2 /**************************************************************************
4 Copyright (c) 2007, Chelsio Inc.
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
29 ***************************************************************************/
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <common/cxgb_common.h>
44 ELMR_FIFO_SZ = 0xe00d,
50 #define VSC_REG(block, subblock, reg) \
51 ((reg) | ((subblock) << 8) | ((block) << 12))
53 int t3_elmr_blk_write(adapter_t *adap, int start, const u32 *vals, int n)
56 const struct mdio_ops *mo = adapter_info(adap)->mdio_ops;
59 ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_ADDR, start);
60 for ( ; !ret && n; n--, vals++) {
61 ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_LO,
64 ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_HI,
71 static int elmr_write(adapter_t *adap, int addr, u32 val)
73 return t3_elmr_blk_write(adap, addr, &val, 1);
76 int t3_elmr_blk_read(adapter_t *adap, int start, u32 *vals, int n)
80 const struct mdio_ops *mo = adapter_info(adap)->mdio_ops;
84 ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_ADDR, start);
88 for (i = 0; i < 5; i++) {
89 ret = mo->read(adap, ELMR_MDIO_ADDR, 0, ELMR_STAT, &v);
101 for ( ; !ret && n; n--, vals++) {
102 ret = mo->read(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_LO, vals);
104 ret = mo->read(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_HI,
109 out: ELMR_UNLOCK(adap);
113 int t3_vsc7323_init(adapter_t *adap, int nports)
115 static struct addr_val_pair sys_avp[] = {
116 { VSC_REG(7, 15, 0xf), 2 },
117 { VSC_REG(7, 15, 0x19), 0xd6 },
118 { VSC_REG(7, 15, 7), 0xc },
119 { VSC_REG(7, 1, 0), 0x220 },
121 static struct addr_val_pair fifo_avp[] = {
122 { VSC_REG(2, 0, 0x2f), 0 },
123 { VSC_REG(2, 0, 0xf), 0xa0010291 },
124 { VSC_REG(2, 1, 0x2f), 1 },
125 { VSC_REG(2, 1, 0xf), 0xa026301 }
127 static struct addr_val_pair xg_avp[] = {
128 { VSC_REG(1, 10, 0), 0x600b },
129 { VSC_REG(1, 10, 1), 0x70600 }, //QUANTA = 96*1024*8/512
130 { VSC_REG(1, 10, 2), 0x2710 },
131 { VSC_REG(1, 10, 5), 0x65 },
132 { VSC_REG(1, 10, 7), 0x23 },
133 { VSC_REG(1, 10, 0x23), 0x800007bf },
134 { VSC_REG(1, 10, 0x23), 0x000007bf },
135 { VSC_REG(1, 10, 0x23), 0x800007bf },
136 { VSC_REG(1, 10, 0x24), 4 }
139 int i, ret, ing_step, egr_step, ing_bot, egr_bot;
141 for (i = 0; i < ARRAY_SIZE(sys_avp); i++)
142 if ((ret = t3_elmr_blk_write(adap, sys_avp[i].reg_addr,
143 &sys_avp[i].val, 1)))
146 ing_step = 0xc0 / nports;
147 egr_step = 0x40 / nports;
148 ing_bot = egr_bot = 0;
149 // ing_wm = ing_step * 64;
150 // egr_wm = egr_step * 64;
152 /* {ING,EGR}_CONTROL.CLR = 1 here */
153 for (i = 0; i < nports; i++) {
155 (ret = elmr_write(adap, VSC_REG(2, 0, 0x10 + i),
156 ((ing_bot + ing_step) << 16) | ing_bot)) ||
157 (ret = elmr_write(adap, VSC_REG(2, 0, 0x40 + i),
159 (ret = elmr_write(adap, VSC_REG(2, 0, 0x50 + i), 1)) ||
160 (ret = elmr_write(adap, VSC_REG(2, 1, 0x10 + i),
161 ((egr_bot + egr_step) << 16) | egr_bot)) ||
162 (ret = elmr_write(adap, VSC_REG(2, 1, 0x40 + i),
164 (ret = elmr_write(adap, VSC_REG(2, 1, 0x50 + i), 0)))
170 for (i = 0; i < ARRAY_SIZE(fifo_avp); i++)
171 if ((ret = t3_elmr_blk_write(adap, fifo_avp[i].reg_addr,
172 &fifo_avp[i].val, 1)))
175 for (i = 0; i < ARRAY_SIZE(xg_avp); i++)
176 if ((ret = t3_elmr_blk_write(adap, xg_avp[i].reg_addr,
180 for (i = 0; i < nports; i++)
181 if ((ret = elmr_write(adap, VSC_REG(1, i, 0), 0xa59c)) ||
182 (ret = elmr_write(adap, VSC_REG(1, i, 5),
183 (i << 12) | 0x63)) ||
184 (ret = elmr_write(adap, VSC_REG(1, i, 0xb), 0x96)) ||
185 (ret = elmr_write(adap, VSC_REG(1, i, 0x15), 0x21)) ||
186 (ret = elmr_write(adap, ELMR_THRES0 + i, 768)))
189 if ((ret = elmr_write(adap, ELMR_BW, 7)))
195 int t3_vsc7323_set_speed_fc(adapter_t *adap, int speed, int fc, int port)
200 if (speed == SPEED_10)
202 else if (speed == SPEED_100)
204 else if (speed == SPEED_1000)
209 if ((r = elmr_write(adap, VSC_REG(1, port, 0),
210 0xa590 | (mode << 2))) ||
211 (r = elmr_write(adap, VSC_REG(1, port, 0xb),
212 0x91 | (clk << 1))) ||
213 (r = elmr_write(adap, VSC_REG(1, port, 0xb),
214 0x90 | (clk << 1))) ||
215 (r = elmr_write(adap, VSC_REG(1, port, 0),
216 0xa593 | (mode << 2))))
220 r = (fc & PAUSE_RX) ? 0x60200 : 0x20200; //QUANTA = 32*1024*8/512
223 return elmr_write(adap, VSC_REG(1, port, 1), r);
226 int t3_vsc7323_set_mtu(adapter_t *adap, unsigned int mtu, int port)
228 return elmr_write(adap, VSC_REG(1, port, 2), mtu);
231 int t3_vsc7323_set_addr(adapter_t *adap, u8 addr[6], int port)
235 ret = elmr_write(adap, VSC_REG(1, port, 3),
236 (addr[0] << 16) | (addr[1] << 8) | addr[2]);
238 ret = elmr_write(adap, VSC_REG(1, port, 4),
239 (addr[3] << 16) | (addr[4] << 8) | addr[5]);
243 int t3_vsc7323_enable(adapter_t *adap, int port, int which)
246 unsigned int v, orig;
248 ret = t3_elmr_blk_read(adap, VSC_REG(1, port, 0), &v, 1);
251 if (which & MAC_DIRECTION_TX)
253 if (which & MAC_DIRECTION_RX)
256 ret = elmr_write(adap, VSC_REG(1, port, 0), v);
261 int t3_vsc7323_disable(adapter_t *adap, int port, int which)
264 unsigned int v, orig;
266 ret = t3_elmr_blk_read(adap, VSC_REG(1, port, 0), &v, 1);
269 if (which & MAC_DIRECTION_TX)
271 if (which & MAC_DIRECTION_RX)
274 ret = elmr_write(adap, VSC_REG(1, port, 0), v);
279 #define STATS0_START 1
280 #define STATS1_START 0x24
281 #define NSTATS0 (0x1d - STATS0_START + 1)
282 #define NSTATS1 (0x2a - STATS1_START + 1)
284 #define ELMR_STAT(port, reg) (ELMR_STATS + port * 0x40 + reg)
286 const struct mac_stats *t3_vsc7323_update_stats(struct cmac *mac)
289 u64 rx_ucast, tx_ucast;
290 u32 stats0[NSTATS0], stats1[NSTATS1];
292 ret = t3_elmr_blk_read(mac->adapter,
293 ELMR_STAT(mac->ext_port, STATS0_START),
296 ret = t3_elmr_blk_read(mac->adapter,
297 ELMR_STAT(mac->ext_port, STATS1_START),
303 * HW counts Rx/Tx unicast frames but we want all the frames.
305 rx_ucast = mac->stats.rx_frames - mac->stats.rx_mcast_frames -
306 mac->stats.rx_bcast_frames;
307 rx_ucast += (u64)(stats0[6 - STATS0_START] - (u32)rx_ucast);
308 tx_ucast = mac->stats.tx_frames - mac->stats.tx_mcast_frames -
309 mac->stats.tx_bcast_frames;
310 tx_ucast += (u64)(stats0[27 - STATS0_START] - (u32)tx_ucast);
312 #define RMON_UPDATE(mac, name, hw_stat) \
313 mac->stats.name += (u64)((hw_stat) - (u32)(mac->stats.name))
315 RMON_UPDATE(mac, rx_octets, stats0[4 - STATS0_START]);
316 RMON_UPDATE(mac, rx_frames, stats0[6 - STATS0_START]);
317 RMON_UPDATE(mac, rx_frames, stats0[7 - STATS0_START]);
318 RMON_UPDATE(mac, rx_frames, stats0[8 - STATS0_START]);
319 RMON_UPDATE(mac, rx_mcast_frames, stats0[7 - STATS0_START]);
320 RMON_UPDATE(mac, rx_bcast_frames, stats0[8 - STATS0_START]);
321 RMON_UPDATE(mac, rx_fcs_errs, stats0[9 - STATS0_START]);
322 RMON_UPDATE(mac, rx_pause, stats0[2 - STATS0_START]);
323 RMON_UPDATE(mac, rx_jabber, stats0[16 - STATS0_START]);
324 RMON_UPDATE(mac, rx_short, stats0[11 - STATS0_START]);
325 RMON_UPDATE(mac, rx_symbol_errs, stats0[1 - STATS0_START]);
326 RMON_UPDATE(mac, rx_too_long, stats0[15 - STATS0_START]);
328 RMON_UPDATE(mac, rx_frames_64, stats0[17 - STATS0_START]);
329 RMON_UPDATE(mac, rx_frames_65_127, stats0[18 - STATS0_START]);
330 RMON_UPDATE(mac, rx_frames_128_255, stats0[19 - STATS0_START]);
331 RMON_UPDATE(mac, rx_frames_256_511, stats0[20 - STATS0_START]);
332 RMON_UPDATE(mac, rx_frames_512_1023, stats0[21 - STATS0_START]);
333 RMON_UPDATE(mac, rx_frames_1024_1518, stats0[22 - STATS0_START]);
334 RMON_UPDATE(mac, rx_frames_1519_max, stats0[23 - STATS0_START]);
336 RMON_UPDATE(mac, tx_octets, stats0[26 - STATS0_START]);
337 RMON_UPDATE(mac, tx_frames, stats0[27 - STATS0_START]);
338 RMON_UPDATE(mac, tx_frames, stats0[28 - STATS0_START]);
339 RMON_UPDATE(mac, tx_frames, stats0[29 - STATS0_START]);
340 RMON_UPDATE(mac, tx_mcast_frames, stats0[28 - STATS0_START]);
341 RMON_UPDATE(mac, tx_bcast_frames, stats0[29 - STATS0_START]);
342 RMON_UPDATE(mac, tx_pause, stats0[25 - STATS0_START]);
344 RMON_UPDATE(mac, tx_underrun, 0);
346 RMON_UPDATE(mac, tx_frames_64, stats1[36 - STATS1_START]);
347 RMON_UPDATE(mac, tx_frames_65_127, stats1[37 - STATS1_START]);
348 RMON_UPDATE(mac, tx_frames_128_255, stats1[38 - STATS1_START]);
349 RMON_UPDATE(mac, tx_frames_256_511, stats1[39 - STATS1_START]);
350 RMON_UPDATE(mac, tx_frames_512_1023, stats1[40 - STATS1_START]);
351 RMON_UPDATE(mac, tx_frames_1024_1518, stats1[41 - STATS1_START]);
352 RMON_UPDATE(mac, tx_frames_1519_max, stats1[42 - STATS1_START]);
356 mac->stats.rx_frames = rx_ucast + mac->stats.rx_mcast_frames +
357 mac->stats.rx_bcast_frames;
358 mac->stats.tx_frames = tx_ucast + mac->stats.tx_mcast_frames +
359 mac->stats.tx_bcast_frames;
360 out: return &mac->stats;