2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
36 #include <linux/mlx4/cmd.h>
41 MLX4_IB_VENDOR_CLASS1 = 0x9,
42 MLX4_IB_VENDOR_CLASS2 = 0xa
45 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
46 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
47 void *in_mad, void *response_mad)
49 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
52 u32 in_modifier = port;
55 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
56 if (IS_ERR(inmailbox))
57 return PTR_ERR(inmailbox);
58 inbox = inmailbox->buf;
60 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
61 if (IS_ERR(outmailbox)) {
62 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
63 return PTR_ERR(outmailbox);
66 memcpy(inbox, in_mad, 256);
69 * Key check traps can't be generated unless we have in_wc to
70 * tell us where to send the trap.
72 if (ignore_mkey || !in_wc)
74 if (ignore_bkey || !in_wc)
90 memset(inbox + 256, 0, 256);
91 ext_info = inbox + 256;
93 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
94 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
95 ext_info->sl = in_wc->sl << 4;
96 ext_info->g_path = in_wc->dlid_path_bits |
97 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
98 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
101 memcpy(ext_info->grh, in_grh, 40);
105 in_modifier |= in_wc->slid << 16;
108 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
109 in_modifier, op_modifier,
110 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
113 memcpy(response_mad, outmailbox->buf, 256);
115 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
116 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
121 static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
123 struct ib_ah *new_ah;
124 struct ib_ah_attr ah_attr;
126 if (!dev->send_agent[port_num - 1][0])
129 memset(&ah_attr, 0, sizeof ah_attr);
132 ah_attr.port_num = port_num;
134 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
139 spin_lock(&dev->sm_lock);
140 if (dev->sm_ah[port_num - 1])
141 ib_destroy_ah(dev->sm_ah[port_num - 1]);
142 dev->sm_ah[port_num - 1] = new_ah;
143 spin_unlock(&dev->sm_lock);
147 * Snoop SM MADs for port info and P_Key table sets, so we can
148 * synthesize LID change and P_Key change events.
150 static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
153 struct ib_event event;
155 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
156 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
157 mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
158 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
159 struct ib_port_info *pinfo =
160 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
161 u16 lid = be16_to_cpu(pinfo->lid);
163 update_sm_ah(to_mdev(ibdev), port_num,
164 be16_to_cpu(pinfo->sm_lid),
165 pinfo->neighbormtu_mastersmsl & 0xf);
167 event.device = ibdev;
168 event.element.port_num = port_num;
170 if (pinfo->clientrereg_resv_subnetto & 0x80) {
171 event.event = IB_EVENT_CLIENT_REREGISTER;
172 ib_dispatch_event(&event);
175 if (prev_lid != lid) {
176 event.event = IB_EVENT_LID_CHANGE;
177 ib_dispatch_event(&event);
181 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
182 event.device = ibdev;
183 event.event = IB_EVENT_PKEY_CHANGE;
184 event.element.port_num = port_num;
185 ib_dispatch_event(&event);
190 static void node_desc_override(struct ib_device *dev,
193 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
194 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
195 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
196 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
197 spin_lock(&to_mdev(dev)->sm_lock);
198 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
199 spin_unlock(&to_mdev(dev)->sm_lock);
203 static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
205 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
206 struct ib_mad_send_buf *send_buf;
207 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
211 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
212 IB_MGMT_MAD_DATA, GFP_ATOMIC);
214 * We rely here on the fact that MLX QPs don't use the
215 * address handle after the send is posted (this is
216 * wrong following the IB spec strictly, but we know
217 * it's OK for our devices).
219 spin_lock(&dev->sm_lock);
220 memcpy(send_buf->mad, mad, sizeof *mad);
221 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
222 ret = ib_post_send_mad(send_buf, NULL);
225 spin_unlock(&dev->sm_lock);
228 ib_free_send_mad(send_buf);
232 static int is_vendor_id(__be16 attr_id)
234 return (attr_id & IB_SMP_ATTR_VENDOR_MASK) == IB_SMP_ATTR_VENDOR_MASK;
237 static int supported_vendor_id(__be16 attr_id)
242 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
243 struct ib_wc *in_wc, struct ib_grh *in_grh,
244 struct ib_mad *in_mad, struct ib_mad *out_mad)
246 u16 slid, prev_lid = 0;
248 struct ib_port_attr pattr;
250 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
252 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
253 forward_trap(to_mdev(ibdev), port_num, in_mad);
254 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
257 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
258 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
259 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
260 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
261 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
262 return IB_MAD_RESULT_SUCCESS;
265 * Don't process SMInfo queries or vendor-specific
266 * MADs -- the SMA can't handle them.
268 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
269 (is_vendor_id(in_mad->mad_hdr.attr_id) &&
270 !supported_vendor_id(in_mad->mad_hdr.attr_id)))
271 return IB_MAD_RESULT_SUCCESS;
272 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
273 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
274 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
275 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
276 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
277 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
278 return IB_MAD_RESULT_SUCCESS;
280 return IB_MAD_RESULT_SUCCESS;
282 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
283 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
284 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
285 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
286 !ib_query_port(ibdev, port_num, &pattr))
287 prev_lid = pattr.lid;
289 err = mlx4_MAD_IFC(to_mdev(ibdev),
290 mad_flags & IB_MAD_IGNORE_MKEY,
291 mad_flags & IB_MAD_IGNORE_BKEY,
292 port_num, in_wc, in_grh, in_mad, out_mad);
294 return IB_MAD_RESULT_FAILURE;
296 if (!out_mad->mad_hdr.status) {
297 smp_snoop(ibdev, port_num, in_mad, prev_lid);
298 node_desc_override(ibdev, out_mad);
301 /* set return bit in status of directed route responses */
302 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
303 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
305 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
306 /* no response for trap repress */
307 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
309 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
312 static __be32 be64_to_be32(__be64 b64)
314 return cpu_to_be32(be64_to_cpu(b64) & 0xffffffff);
317 static void edit_counters(struct mlx4_counters *cnt, void *data)
319 *(__be32 *)(data + 40 + 24) = be64_to_be32(cnt->tx_bytes);
320 *(__be32 *)(data + 40 + 28) = be64_to_be32(cnt->rx_bytes);
321 *(__be32 *)(data + 40 + 32) = be64_to_be32(cnt->tx_frames);
322 *(__be32 *)(data + 40 + 36) = be64_to_be32(cnt->rx_frames);
325 static void edit_ext_counters(struct mlx4_counters_ext *cnt, void *data)
327 *(__be32 *)(data + 40 + 24) = be64_to_be32(cnt->tx_uni_bytes);
328 *(__be32 *)(data + 40 + 28) = be64_to_be32(cnt->rx_uni_bytes);
329 *(__be32 *)(data + 40 + 32) = be64_to_be32(cnt->tx_uni_frames);
330 *(__be32 *)(data + 40 + 36) = be64_to_be32(cnt->rx_uni_frames);
331 *(__be32 *)(data + 40 + 8) = be64_to_be32(cnt->rx_err_frames);
334 static int rdmaoe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
335 struct ib_wc *in_wc, struct ib_grh *in_grh,
336 struct ib_mad *in_mad, struct ib_mad *out_mad)
338 struct mlx4_cmd_mailbox *mailbox;
339 struct mlx4_ib_dev *dev = to_mdev(ibdev);
341 u32 inmod = dev->counters[port_num - 1] & 0xffff;
344 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
347 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
349 return IB_MAD_RESULT_FAILURE;
351 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
352 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
354 err = IB_MAD_RESULT_FAILURE;
356 memset(out_mad->data, 0, sizeof out_mad->data);
357 mode = be32_to_cpu(((struct mlx4_counters *)mailbox->buf)->counter_mode) & 0xf;
360 edit_counters(mailbox->buf, out_mad->data);
361 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
364 edit_ext_counters(mailbox->buf, out_mad->data);
365 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
368 err = IB_MAD_RESULT_FAILURE;
372 mlx4_free_cmd_mailbox(dev->dev, mailbox);
377 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
378 struct ib_wc *in_wc, struct ib_grh *in_grh,
379 struct ib_mad *in_mad, struct ib_mad *out_mad)
381 switch (rdma_port_get_link_layer(ibdev, port_num)) {
382 case IB_LINK_LAYER_INFINIBAND:
383 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
384 in_grh, in_mad, out_mad);
385 case IB_LINK_LAYER_ETHERNET:
386 return rdmaoe_process_mad(ibdev, mad_flags, port_num, in_wc,
387 in_grh, in_mad, out_mad);
393 static void send_handler(struct ib_mad_agent *agent,
394 struct ib_mad_send_wc *mad_send_wc)
396 ib_free_send_mad(mad_send_wc->send_buf);
399 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
401 struct ib_mad_agent *agent;
404 enum rdma_link_layer ll;
406 for (p = 0; p < dev->num_ports; ++p) {
407 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
408 for (q = 0; q <= 1; ++q) {
409 if (ll == IB_LINK_LAYER_INFINIBAND) {
410 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
411 q ? IB_QPT_GSI : IB_QPT_SMI,
412 NULL, 0, send_handler,
415 ret = PTR_ERR(agent);
418 dev->send_agent[p][q] = agent;
420 dev->send_agent[p][q] = NULL;
427 for (p = 0; p < dev->num_ports; ++p)
428 for (q = 0; q <= 1; ++q)
429 if (dev->send_agent[p][q])
430 ib_unregister_mad_agent(dev->send_agent[p][q]);
435 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
437 struct ib_mad_agent *agent;
440 for (p = 0; p < dev->num_ports; ++p) {
441 for (q = 0; q <= 1; ++q) {
442 agent = dev->send_agent[p][q];
444 dev->send_agent[p][q] = NULL;
445 ib_unregister_mad_agent(agent);
450 ib_destroy_ah(dev->sm_ah[p]);