2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/if_vlan.h>
39 #include <linux/mlx4/device.h>
40 #include <linux/mlx4/cmd.h>
42 #if 0 // moved to port.c
43 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
44 u64 mac, u64 clear, u8 mode)
46 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
47 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
51 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, u32 *vlans)
53 struct mlx4_cmd_mailbox *mailbox;
54 struct mlx4_set_vlan_fltr_mbox *filter;
58 mailbox = mlx4_alloc_cmd_mailbox(dev);
60 return PTR_ERR(mailbox);
62 filter = mailbox->buf;
63 memset(filter, 0, sizeof *filter);
65 for (i = 0, j = VLAN_FLTR_SIZE - 1; i < VLAN_FLTR_SIZE;
67 filter->entry[j] = cpu_to_be32(vlans[i]);
68 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
69 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
70 mlx4_free_cmd_mailbox(dev, mailbox);
75 #if 0 //moved to port.c - shahark
76 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
77 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
79 struct mlx4_cmd_mailbox *mailbox;
80 struct mlx4_set_port_general_context *context;
84 mailbox = mlx4_alloc_cmd_mailbox(dev);
86 return PTR_ERR(mailbox);
87 context = mailbox->buf;
88 memset(context, 0, sizeof *context);
90 context->flags = SET_PORT_GEN_ALL_VALID;
91 context->mtu = cpu_to_be16(mtu);
92 context->pptx = (pptx * (!pfctx)) << 7;
93 context->pfctx = pfctx;
94 context->pprx = (pprx * (!pfcrx)) << 7;
95 context->pfcrx = pfcrx;
97 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
98 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
99 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
101 mlx4_free_cmd_mailbox(dev, mailbox);
104 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
108 printf("%s %s:%d\n", __func__, __FILE__, __LINE__);
112 struct mlx4_cmd_mailbox *mailbox;
113 struct mlx4_set_port_rqp_calc_context *context;
117 mailbox = mlx4_alloc_cmd_mailbox(dev);
119 return PTR_ERR(mailbox);
120 context = mailbox->buf;
121 memset(context, 0, sizeof *context);
123 context->base_qpn = cpu_to_be32(base_qpn);
124 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
126 context->mcast = cpu_to_be32((dev->caps.mc_promisc_mode <<
127 SET_PORT_PROMISC_MODE_SHIFT) | base_qpn);
129 context->intra_no_vlan = 0;
130 context->no_vlan = MLX4_NO_VLAN_IDX;
131 context->intra_vlan_miss = 0;
132 context->vlan_miss = MLX4_VLAN_MISS_IDX;
134 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
135 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
136 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
138 mlx4_free_cmd_mailbox(dev, mailbox);
143 int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
145 struct mlx4_en_query_port_context *qport_context;
146 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
147 struct mlx4_en_port_state *state = &priv->port_state;
148 struct mlx4_cmd_mailbox *mailbox;
151 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
153 return PTR_ERR(mailbox);
154 memset(mailbox->buf, 0, sizeof(*qport_context));
155 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
156 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
159 qport_context = mailbox->buf;
161 /* This command is always accessed from Ethtool context
162 * already synchronized, no need in locking */
163 state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
164 switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
165 case MLX4_EN_1G_SPEED:
166 state->link_speed = 1000;
168 case MLX4_EN_10G_SPEED_XAUI:
169 case MLX4_EN_10G_SPEED_XFI:
170 state->link_speed = 10000;
172 case MLX4_EN_40G_SPEED:
173 state->link_speed = 40000;
176 state->link_speed = -1;
179 state->transciver = qport_context->transceiver;
180 if (be32_to_cpu(qport_context->transceiver_code_hi) & 0x400)
181 state->transciver = 0x80;
184 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
189 static int read_iboe_counters(struct mlx4_dev *dev, int index, u64 counters[])
191 struct mlx4_cmd_mailbox *mailbox;
194 struct mlx4_counters_ext *ext;
195 struct mlx4_counters *reg;
197 mailbox = mlx4_alloc_cmd_mailbox(dev);
201 err = mlx4_cmd_box(dev, 0, mailbox->dma, index, 0,
202 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_WRAPPED);
206 mode = be32_to_cpu(((struct mlx4_counters *)mailbox->buf)->counter_mode) & 0xf;
210 counters[0] = be64_to_cpu(reg->rx_frames);
211 counters[1] = be64_to_cpu(reg->tx_frames);
212 counters[2] = be64_to_cpu(reg->rx_bytes);
213 counters[3] = be64_to_cpu(reg->tx_bytes);
217 counters[0] = be64_to_cpu(ext->rx_uni_frames);
218 counters[1] = be64_to_cpu(ext->tx_uni_frames);
219 counters[2] = be64_to_cpu(ext->rx_uni_bytes);
220 counters[3] = be64_to_cpu(ext->tx_uni_bytes);
227 mlx4_free_cmd_mailbox(dev, mailbox);
232 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
234 struct mlx4_en_stat_out_mbox *mlx4_en_stats;
235 struct net_device *dev;
236 struct mlx4_en_priv *priv;
237 struct mlx4_cmd_mailbox *mailbox;
238 u64 in_mod = reset << 8 | port;
239 unsigned long oerror;
240 unsigned long ierror;
246 dev = mdev->pndev[port];
247 priv = netdev_priv(dev);
248 memset(counters, 0, sizeof counters);
250 counter = mlx4_get_iboe_counter(priv->mdev->dev, port);
252 err = read_iboe_counters(priv->mdev->dev, counter, counters);
255 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
257 return PTR_ERR(mailbox);
258 memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
259 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
260 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
264 mlx4_en_stats = mailbox->buf;
266 spin_lock(&priv->stats_lock);
269 dev->if_ipackets = counters[0];
270 dev->if_ibytes = counters[2];
271 for (i = 0; i < priv->rx_ring_num; i++) {
272 dev->if_ipackets += priv->rx_ring[i].packets;
273 dev->if_ibytes += priv->rx_ring[i].bytes;
274 ierror += priv->rx_ring[i].errors;
276 dev->if_opackets = counters[1];
277 dev->if_obytes = counters[3];
278 for (i = 0; i <= priv->tx_ring_num; i++) {
279 dev->if_opackets += priv->tx_ring[i].packets;
280 dev->if_obytes += priv->tx_ring[i].bytes;
281 oerror += priv->tx_ring[i].errors;
284 dev->if_ierrors = be32_to_cpu(mlx4_en_stats->RDROP) + ierror;
285 dev->if_oerrors = be32_to_cpu(mlx4_en_stats->TDROP) + oerror;
286 dev->if_imcasts = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
287 be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
288 be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
289 be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
290 be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
291 be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
292 be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
293 be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
294 be64_to_cpu(mlx4_en_stats->MCAST_novlan);
295 dev->if_omcasts = be64_to_cpu(mlx4_en_stats->TMCAST_prio_0) +
296 be64_to_cpu(mlx4_en_stats->TMCAST_prio_1) +
297 be64_to_cpu(mlx4_en_stats->TMCAST_prio_2) +
298 be64_to_cpu(mlx4_en_stats->TMCAST_prio_3) +
299 be64_to_cpu(mlx4_en_stats->TMCAST_prio_4) +
300 be64_to_cpu(mlx4_en_stats->TMCAST_prio_5) +
301 be64_to_cpu(mlx4_en_stats->TMCAST_prio_6) +
302 be64_to_cpu(mlx4_en_stats->TMCAST_prio_7) +
303 be64_to_cpu(mlx4_en_stats->TMCAST_novlan);
304 dev->if_collisions = 0;
306 priv->pkstats.broadcast =
307 be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
308 be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
309 be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
310 be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
311 be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
312 be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
313 be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
314 be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
315 be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
316 priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
317 priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
318 priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
319 priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
320 priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
321 priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
322 priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
323 priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
324 priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
325 priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
326 priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
327 priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
328 priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
329 priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
330 priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
331 priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
332 spin_unlock(&priv->stats_lock);
335 mlx4_free_cmd_mailbox(mdev->dev, mailbox);