2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
4 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
5 * Copyright (c) 2005 Intel Corporation. All rights reserved.
6 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
7 * Copyright (c) 2009 HNR Consulting. All rights reserved.
8 * Copyright (c) 2014 Intel Corporation. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #define LINUXKPI_PARAM_PREFIX ibcore_
43 #define KBUILD_MODNAME "ibcore"
45 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
47 #include <linux/dma-mapping.h>
48 #include <linux/slab.h>
49 #include <linux/module.h>
50 #include <rdma/ib_cache.h>
57 #include "core_priv.h"
59 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
60 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
62 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
63 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
64 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
65 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
67 static struct list_head ib_mad_port_list;
68 static u32 ib_mad_client_id = 0;
71 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
73 /* Forward declarations */
74 static int method_in_use(struct ib_mad_mgmt_method_table **method,
75 struct ib_mad_reg_req *mad_reg_req);
76 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
77 static struct ib_mad_agent_private *find_mad_agent(
78 struct ib_mad_port_private *port_priv,
79 const struct ib_mad_hdr *mad);
80 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
81 struct ib_mad_private *mad);
82 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
83 static void timeout_sends(struct work_struct *work);
84 static void local_completions(struct work_struct *work);
85 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv,
88 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
89 struct ib_mad_agent_private *agent_priv);
90 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
92 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
95 * Returns a ib_mad_port_private structure or NULL for a device/port
96 * Assumes ib_mad_port_list_lock is being held
98 static inline struct ib_mad_port_private *
99 __ib_get_mad_port(struct ib_device *device, int port_num)
101 struct ib_mad_port_private *entry;
103 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
104 if (entry->device == device && entry->port_num == port_num)
111 * Wrapper function to return a ib_mad_port_private structure or NULL
114 static inline struct ib_mad_port_private *
115 ib_get_mad_port(struct ib_device *device, int port_num)
117 struct ib_mad_port_private *entry;
120 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
121 entry = __ib_get_mad_port(device, port_num);
122 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
127 static inline u8 convert_mgmt_class(u8 mgmt_class)
129 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
130 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
134 static int get_spl_qp_index(enum ib_qp_type qp_type)
147 static int vendor_class_index(u8 mgmt_class)
149 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
152 static int is_vendor_class(u8 mgmt_class)
154 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
155 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
160 static int is_vendor_oui(char *oui)
162 if (oui[0] || oui[1] || oui[2])
167 static int is_vendor_method_in_use(
168 struct ib_mad_mgmt_vendor_class *vendor_class,
169 struct ib_mad_reg_req *mad_reg_req)
171 struct ib_mad_mgmt_method_table *method;
174 for (i = 0; i < MAX_MGMT_OUI; i++) {
175 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
176 method = vendor_class->method_table[i];
178 if (method_in_use(&method, mad_reg_req))
188 int ib_response_mad(const struct ib_mad_hdr *hdr)
190 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
191 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
192 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
193 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
195 EXPORT_SYMBOL(ib_response_mad);
198 * ib_register_mad_agent - Register to send/receive MADs
200 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
202 enum ib_qp_type qp_type,
203 struct ib_mad_reg_req *mad_reg_req,
205 ib_mad_send_handler send_handler,
206 ib_mad_recv_handler recv_handler,
208 u32 registration_flags)
210 struct ib_mad_port_private *port_priv;
211 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
212 struct ib_mad_agent_private *mad_agent_priv;
213 struct ib_mad_reg_req *reg_req = NULL;
214 struct ib_mad_mgmt_class_table *class;
215 struct ib_mad_mgmt_vendor_class_table *vendor;
216 struct ib_mad_mgmt_vendor_class *vendor_class;
217 struct ib_mad_mgmt_method_table *method;
220 u8 mgmt_class, vclass;
222 /* Validate parameters */
223 qpn = get_spl_qp_index(qp_type);
225 dev_notice(&device->dev,
226 "ib_register_mad_agent: invalid QP Type %d\n",
231 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
232 dev_notice(&device->dev,
233 "ib_register_mad_agent: invalid RMPP Version %u\n",
238 /* Validate MAD registration request if supplied */
240 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
241 dev_notice(&device->dev,
242 "ib_register_mad_agent: invalid Class Version %u\n",
243 mad_reg_req->mgmt_class_version);
247 dev_notice(&device->dev,
248 "ib_register_mad_agent: no recv_handler\n");
251 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
253 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
254 * one in this range currently allowed
256 if (mad_reg_req->mgmt_class !=
257 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
258 dev_notice(&device->dev,
259 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
260 mad_reg_req->mgmt_class);
263 } else if (mad_reg_req->mgmt_class == 0) {
265 * Class 0 is reserved in IBA and is used for
266 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
268 dev_notice(&device->dev,
269 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
271 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
273 * If class is in "new" vendor range,
274 * ensure supplied OUI is not zero
276 if (!is_vendor_oui(mad_reg_req->oui)) {
277 dev_notice(&device->dev,
278 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
279 mad_reg_req->mgmt_class);
283 /* Make sure class supplied is consistent with RMPP */
284 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
286 dev_notice(&device->dev,
287 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
288 mad_reg_req->mgmt_class);
293 /* Make sure class supplied is consistent with QP type */
294 if (qp_type == IB_QPT_SMI) {
295 if ((mad_reg_req->mgmt_class !=
296 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
297 (mad_reg_req->mgmt_class !=
298 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
299 dev_notice(&device->dev,
300 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
301 mad_reg_req->mgmt_class);
305 if ((mad_reg_req->mgmt_class ==
306 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
307 (mad_reg_req->mgmt_class ==
308 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
309 dev_notice(&device->dev,
310 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
311 mad_reg_req->mgmt_class);
316 /* No registration request supplied */
319 if (registration_flags & IB_MAD_USER_RMPP)
323 /* Validate device and port */
324 port_priv = ib_get_mad_port(device, port_num);
326 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
327 ret = ERR_PTR(-ENODEV);
331 /* Verify the QP requested is supported. For example, Ethernet devices
332 * will not have QP0 */
333 if (!port_priv->qp_info[qpn].qp) {
334 dev_notice(&device->dev,
335 "ib_register_mad_agent: QP %d not supported\n", qpn);
336 ret = ERR_PTR(-EPROTONOSUPPORT);
340 /* Allocate structures */
341 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
342 if (!mad_agent_priv) {
343 ret = ERR_PTR(-ENOMEM);
348 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
350 ret = ERR_PTR(-ENOMEM);
355 /* Now, fill in the various structures */
356 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
357 mad_agent_priv->reg_req = reg_req;
358 mad_agent_priv->agent.rmpp_version = rmpp_version;
359 mad_agent_priv->agent.device = device;
360 mad_agent_priv->agent.recv_handler = recv_handler;
361 mad_agent_priv->agent.send_handler = send_handler;
362 mad_agent_priv->agent.context = context;
363 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
364 mad_agent_priv->agent.port_num = port_num;
365 mad_agent_priv->agent.flags = registration_flags;
366 spin_lock_init(&mad_agent_priv->lock);
367 INIT_LIST_HEAD(&mad_agent_priv->send_list);
368 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
369 INIT_LIST_HEAD(&mad_agent_priv->done_list);
370 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
371 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
372 INIT_LIST_HEAD(&mad_agent_priv->local_list);
373 INIT_WORK(&mad_agent_priv->local_work, local_completions);
374 atomic_set(&mad_agent_priv->refcount, 1);
375 init_completion(&mad_agent_priv->comp);
377 spin_lock_irqsave(&port_priv->reg_lock, flags);
378 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
381 * Make sure MAD registration (if supplied)
382 * is non overlapping with any existing ones
385 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
386 if (!is_vendor_class(mgmt_class)) {
387 class = port_priv->version[mad_reg_req->
388 mgmt_class_version].class;
390 method = class->method_table[mgmt_class];
392 if (method_in_use(&method,
397 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
400 /* "New" vendor class range */
401 vendor = port_priv->version[mad_reg_req->
402 mgmt_class_version].vendor;
404 vclass = vendor_class_index(mgmt_class);
405 vendor_class = vendor->vendor_class[vclass];
407 if (is_vendor_method_in_use(
413 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
421 /* Add mad agent into port's agent list */
422 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
423 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
425 return &mad_agent_priv->agent;
428 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
431 kfree(mad_agent_priv);
435 EXPORT_SYMBOL(ib_register_mad_agent);
437 static inline int is_snooping_sends(int mad_snoop_flags)
439 return (mad_snoop_flags &
440 (/*IB_MAD_SNOOP_POSTED_SENDS |
441 IB_MAD_SNOOP_RMPP_SENDS |*/
442 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
443 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
446 static inline int is_snooping_recvs(int mad_snoop_flags)
448 return (mad_snoop_flags &
449 (IB_MAD_SNOOP_RECVS /*|
450 IB_MAD_SNOOP_RMPP_RECVS*/));
453 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
454 struct ib_mad_snoop_private *mad_snoop_priv)
456 struct ib_mad_snoop_private **new_snoop_table;
460 spin_lock_irqsave(&qp_info->snoop_lock, flags);
461 /* Check for empty slot in array. */
462 for (i = 0; i < qp_info->snoop_table_size; i++)
463 if (!qp_info->snoop_table[i])
466 if (i == qp_info->snoop_table_size) {
468 new_snoop_table = krealloc(qp_info->snoop_table,
469 sizeof mad_snoop_priv *
470 (qp_info->snoop_table_size + 1),
472 if (!new_snoop_table) {
477 qp_info->snoop_table = new_snoop_table;
478 qp_info->snoop_table_size++;
480 qp_info->snoop_table[i] = mad_snoop_priv;
481 atomic_inc(&qp_info->snoop_count);
483 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
487 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
489 enum ib_qp_type qp_type,
491 ib_mad_snoop_handler snoop_handler,
492 ib_mad_recv_handler recv_handler,
495 struct ib_mad_port_private *port_priv;
496 struct ib_mad_agent *ret;
497 struct ib_mad_snoop_private *mad_snoop_priv;
500 /* Validate parameters */
501 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
502 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
503 ret = ERR_PTR(-EINVAL);
506 qpn = get_spl_qp_index(qp_type);
508 ret = ERR_PTR(-EINVAL);
511 port_priv = ib_get_mad_port(device, port_num);
513 ret = ERR_PTR(-ENODEV);
516 /* Allocate structures */
517 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
518 if (!mad_snoop_priv) {
519 ret = ERR_PTR(-ENOMEM);
523 /* Now, fill in the various structures */
524 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
525 mad_snoop_priv->agent.device = device;
526 mad_snoop_priv->agent.recv_handler = recv_handler;
527 mad_snoop_priv->agent.snoop_handler = snoop_handler;
528 mad_snoop_priv->agent.context = context;
529 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
530 mad_snoop_priv->agent.port_num = port_num;
531 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
532 init_completion(&mad_snoop_priv->comp);
533 mad_snoop_priv->snoop_index = register_snoop_agent(
534 &port_priv->qp_info[qpn],
536 if (mad_snoop_priv->snoop_index < 0) {
537 ret = ERR_PTR(mad_snoop_priv->snoop_index);
541 atomic_set(&mad_snoop_priv->refcount, 1);
542 return &mad_snoop_priv->agent;
545 kfree(mad_snoop_priv);
549 EXPORT_SYMBOL(ib_register_mad_snoop);
551 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
553 if (atomic_dec_and_test(&mad_agent_priv->refcount))
554 complete(&mad_agent_priv->comp);
557 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
559 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
560 complete(&mad_snoop_priv->comp);
563 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
565 struct ib_mad_port_private *port_priv;
568 /* Note that we could still be handling received MADs */
571 * Canceling all sends results in dropping received response
572 * MADs, preventing us from queuing additional work
574 cancel_mads(mad_agent_priv);
575 port_priv = mad_agent_priv->qp_info->port_priv;
576 cancel_delayed_work_sync(&mad_agent_priv->timed_work);
578 spin_lock_irqsave(&port_priv->reg_lock, flags);
579 remove_mad_reg_req(mad_agent_priv);
580 list_del(&mad_agent_priv->agent_list);
581 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
583 flush_workqueue(port_priv->wq);
584 ib_cancel_rmpp_recvs(mad_agent_priv);
586 deref_mad_agent(mad_agent_priv);
587 wait_for_completion(&mad_agent_priv->comp);
589 kfree(mad_agent_priv->reg_req);
590 kfree(mad_agent_priv);
593 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
595 struct ib_mad_qp_info *qp_info;
598 qp_info = mad_snoop_priv->qp_info;
599 spin_lock_irqsave(&qp_info->snoop_lock, flags);
600 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
601 atomic_dec(&qp_info->snoop_count);
602 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
604 deref_snoop_agent(mad_snoop_priv);
605 wait_for_completion(&mad_snoop_priv->comp);
607 kfree(mad_snoop_priv);
611 * ib_unregister_mad_agent - Unregisters a client from using MAD services
613 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
615 struct ib_mad_agent_private *mad_agent_priv;
616 struct ib_mad_snoop_private *mad_snoop_priv;
618 /* If the TID is zero, the agent can only snoop. */
619 if (mad_agent->hi_tid) {
620 mad_agent_priv = container_of(mad_agent,
621 struct ib_mad_agent_private,
623 unregister_mad_agent(mad_agent_priv);
625 mad_snoop_priv = container_of(mad_agent,
626 struct ib_mad_snoop_private,
628 unregister_mad_snoop(mad_snoop_priv);
632 EXPORT_SYMBOL(ib_unregister_mad_agent);
634 static void dequeue_mad(struct ib_mad_list_head *mad_list)
636 struct ib_mad_queue *mad_queue;
639 BUG_ON(!mad_list->mad_queue);
640 mad_queue = mad_list->mad_queue;
641 spin_lock_irqsave(&mad_queue->lock, flags);
642 list_del(&mad_list->list);
644 spin_unlock_irqrestore(&mad_queue->lock, flags);
647 static void snoop_send(struct ib_mad_qp_info *qp_info,
648 struct ib_mad_send_buf *send_buf,
649 struct ib_mad_send_wc *mad_send_wc,
652 struct ib_mad_snoop_private *mad_snoop_priv;
656 spin_lock_irqsave(&qp_info->snoop_lock, flags);
657 for (i = 0; i < qp_info->snoop_table_size; i++) {
658 mad_snoop_priv = qp_info->snoop_table[i];
659 if (!mad_snoop_priv ||
660 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
663 atomic_inc(&mad_snoop_priv->refcount);
664 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
665 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
666 send_buf, mad_send_wc);
667 deref_snoop_agent(mad_snoop_priv);
668 spin_lock_irqsave(&qp_info->snoop_lock, flags);
670 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
673 static void snoop_recv(struct ib_mad_qp_info *qp_info,
674 struct ib_mad_recv_wc *mad_recv_wc,
677 struct ib_mad_snoop_private *mad_snoop_priv;
681 spin_lock_irqsave(&qp_info->snoop_lock, flags);
682 for (i = 0; i < qp_info->snoop_table_size; i++) {
683 mad_snoop_priv = qp_info->snoop_table[i];
684 if (!mad_snoop_priv ||
685 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
688 atomic_inc(&mad_snoop_priv->refcount);
689 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
690 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
692 deref_snoop_agent(mad_snoop_priv);
693 spin_lock_irqsave(&qp_info->snoop_lock, flags);
695 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
698 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
699 u16 pkey_index, u8 port_num, struct ib_wc *wc)
701 memset(wc, 0, sizeof *wc);
703 wc->status = IB_WC_SUCCESS;
704 wc->opcode = IB_WC_RECV;
705 wc->pkey_index = pkey_index;
706 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
711 wc->dlid_path_bits = 0;
712 wc->port_num = port_num;
715 static size_t mad_priv_size(const struct ib_mad_private *mp)
717 return sizeof(struct ib_mad_private) + mp->mad_size;
720 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
722 size_t size = sizeof(struct ib_mad_private) + mad_size;
723 struct ib_mad_private *ret = kzalloc(size, flags);
726 ret->mad_size = mad_size;
731 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
733 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
736 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
738 return sizeof(struct ib_grh) + mp->mad_size;
742 * Return 0 if SMP is to be sent
743 * Return 1 if SMP was consumed locally (whether or not solicited)
744 * Return < 0 if error
746 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
747 struct ib_mad_send_wr_private *mad_send_wr)
750 struct ib_smp *smp = mad_send_wr->send_buf.mad;
751 struct opa_smp *opa_smp = (struct opa_smp *)smp;
753 struct ib_mad_local_private *local;
754 struct ib_mad_private *mad_priv;
755 struct ib_mad_port_private *port_priv;
756 struct ib_mad_agent_private *recv_mad_agent = NULL;
757 struct ib_device *device = mad_agent_priv->agent.device;
760 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
761 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
762 u16 out_mad_pkey_index = 0;
764 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
765 mad_agent_priv->qp_info->port_priv->port_num);
767 if (rdma_cap_ib_switch(device) &&
768 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
769 port_num = send_wr->port_num;
771 port_num = mad_agent_priv->agent.port_num;
774 * Directed route handling starts if the initial LID routed part of
775 * a request or the ending LID routed part of a response is empty.
776 * If we are at the start of the LID routed part, don't update the
777 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
779 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
782 if ((opa_get_smp_direction(opa_smp)
783 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
784 OPA_LID_PERMISSIVE &&
785 opa_smi_handle_dr_smp_send(opa_smp,
786 rdma_cap_ib_switch(device),
787 port_num) == IB_SMI_DISCARD) {
789 dev_err(&device->dev, "OPA Invalid directed route\n");
792 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
793 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
794 opa_drslid & 0xffff0000) {
796 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
800 drslid = (u16)(opa_drslid & 0x0000ffff);
802 /* Check to post send on QP or process locally */
803 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
804 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
807 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
809 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
812 dev_err(&device->dev, "Invalid directed route\n");
815 drslid = be16_to_cpu(smp->dr_slid);
817 /* Check to post send on QP or process locally */
818 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
819 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
823 local = kmalloc(sizeof *local, GFP_ATOMIC);
826 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
829 local->mad_priv = NULL;
830 local->recv_mad_agent = NULL;
831 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
834 dev_err(&device->dev, "No memory for local response MAD\n");
839 build_smp_wc(mad_agent_priv->agent.qp,
840 send_wr->wr.wr_cqe, drslid,
842 send_wr->port_num, &mad_wc);
844 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
845 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
846 + mad_send_wr->send_buf.data_len
847 + sizeof(struct ib_grh);
850 /* No GRH for DR SMP */
851 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
852 (const struct ib_mad_hdr *)smp, mad_size,
853 (struct ib_mad_hdr *)mad_priv->mad,
854 &mad_size, &out_mad_pkey_index);
857 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
858 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
859 mad_agent_priv->agent.recv_handler) {
860 local->mad_priv = mad_priv;
861 local->recv_mad_agent = mad_agent_priv;
863 * Reference MAD agent until receive
864 * side of local completion handled
866 atomic_inc(&mad_agent_priv->refcount);
870 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
873 case IB_MAD_RESULT_SUCCESS:
874 /* Treat like an incoming receive MAD */
875 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
876 mad_agent_priv->agent.port_num);
878 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
879 recv_mad_agent = find_mad_agent(port_priv,
880 (const struct ib_mad_hdr *)mad_priv->mad);
882 if (!port_priv || !recv_mad_agent) {
884 * No receiving agent so drop packet and
885 * generate send completion.
890 local->mad_priv = mad_priv;
891 local->recv_mad_agent = recv_mad_agent;
900 local->mad_send_wr = mad_send_wr;
902 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
903 local->return_wc_byte_len = mad_size;
905 /* Reference MAD agent until send side of local completion handled */
906 atomic_inc(&mad_agent_priv->refcount);
907 /* Queue local completion to local list */
908 spin_lock_irqsave(&mad_agent_priv->lock, flags);
909 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
910 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
911 queue_work(mad_agent_priv->qp_info->port_priv->wq,
912 &mad_agent_priv->local_work);
918 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
922 seg_size = mad_size - hdr_len;
923 if (data_len && seg_size) {
924 pad = seg_size - data_len % seg_size;
925 return pad == seg_size ? 0 : pad;
930 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
932 struct ib_rmpp_segment *s, *t;
934 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
940 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
941 size_t mad_size, gfp_t gfp_mask)
943 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
944 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
945 struct ib_rmpp_segment *seg = NULL;
946 int left, seg_size, pad;
948 send_buf->seg_size = mad_size - send_buf->hdr_len;
949 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
950 seg_size = send_buf->seg_size;
953 /* Allocate data segments. */
954 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
955 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
957 dev_err(&send_buf->mad_agent->device->dev,
958 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
959 sizeof (*seg) + seg_size, gfp_mask);
960 free_send_rmpp_list(send_wr);
963 seg->num = ++send_buf->seg_count;
964 list_add_tail(&seg->list, &send_wr->rmpp_list);
967 /* Zero any padding */
969 memset(seg->data + seg_size - pad, 0, pad);
971 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
973 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
974 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
976 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
977 struct ib_rmpp_segment, list);
978 send_wr->last_ack_seg = send_wr->cur_seg;
982 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
984 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
986 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
988 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
989 u32 remote_qpn, u16 pkey_index,
991 int hdr_len, int data_len,
995 struct ib_mad_agent_private *mad_agent_priv;
996 struct ib_mad_send_wr_private *mad_send_wr;
997 int pad, message_size, ret, size;
1002 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1005 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1007 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1008 mad_size = sizeof(struct opa_mad);
1010 mad_size = sizeof(struct ib_mad);
1012 pad = get_pad_size(hdr_len, data_len, mad_size);
1013 message_size = hdr_len + data_len + pad;
1015 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1016 if (!rmpp_active && message_size > mad_size)
1017 return ERR_PTR(-EINVAL);
1019 if (rmpp_active || message_size > mad_size)
1020 return ERR_PTR(-EINVAL);
1022 size = rmpp_active ? hdr_len : mad_size;
1023 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1025 return ERR_PTR(-ENOMEM);
1027 mad_send_wr = (struct ib_mad_send_wr_private *)((char *)buf + size);
1028 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1029 mad_send_wr->send_buf.mad = buf;
1030 mad_send_wr->send_buf.hdr_len = hdr_len;
1031 mad_send_wr->send_buf.data_len = data_len;
1032 mad_send_wr->pad = pad;
1034 mad_send_wr->mad_agent_priv = mad_agent_priv;
1035 mad_send_wr->sg_list[0].length = hdr_len;
1036 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1038 /* OPA MADs don't have to be the full 2048 bytes */
1039 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1040 data_len < mad_size - hdr_len)
1041 mad_send_wr->sg_list[1].length = data_len;
1043 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1045 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1047 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1049 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1050 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1051 mad_send_wr->send_wr.wr.num_sge = 2;
1052 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1053 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1054 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1055 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1056 mad_send_wr->send_wr.pkey_index = pkey_index;
1059 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1062 return ERR_PTR(ret);
1066 mad_send_wr->send_buf.mad_agent = mad_agent;
1067 atomic_inc(&mad_agent_priv->refcount);
1068 return &mad_send_wr->send_buf;
1070 EXPORT_SYMBOL(ib_create_send_mad);
1072 int ib_get_mad_data_offset(u8 mgmt_class)
1074 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1075 return IB_MGMT_SA_HDR;
1076 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1077 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1078 (mgmt_class == IB_MGMT_CLASS_BIS))
1079 return IB_MGMT_DEVICE_HDR;
1080 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1081 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1082 return IB_MGMT_VENDOR_HDR;
1084 return IB_MGMT_MAD_HDR;
1086 EXPORT_SYMBOL(ib_get_mad_data_offset);
1088 int ib_is_mad_class_rmpp(u8 mgmt_class)
1090 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1091 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1092 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1093 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1094 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1095 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1099 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1101 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1103 struct ib_mad_send_wr_private *mad_send_wr;
1104 struct list_head *list;
1106 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1108 list = &mad_send_wr->cur_seg->list;
1110 if (mad_send_wr->cur_seg->num < seg_num) {
1111 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1112 if (mad_send_wr->cur_seg->num == seg_num)
1114 } else if (mad_send_wr->cur_seg->num > seg_num) {
1115 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1116 if (mad_send_wr->cur_seg->num == seg_num)
1119 return mad_send_wr->cur_seg->data;
1121 EXPORT_SYMBOL(ib_get_rmpp_segment);
1123 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1125 if (mad_send_wr->send_buf.seg_count)
1126 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1127 mad_send_wr->seg_num);
1129 return (char *)mad_send_wr->send_buf.mad +
1130 mad_send_wr->send_buf.hdr_len;
1133 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1135 struct ib_mad_agent_private *mad_agent_priv;
1136 struct ib_mad_send_wr_private *mad_send_wr;
1138 mad_agent_priv = container_of(send_buf->mad_agent,
1139 struct ib_mad_agent_private, agent);
1140 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1143 free_send_rmpp_list(mad_send_wr);
1144 kfree(send_buf->mad);
1145 deref_mad_agent(mad_agent_priv);
1147 EXPORT_SYMBOL(ib_free_send_mad);
1149 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1151 struct ib_mad_qp_info *qp_info;
1152 struct list_head *list;
1153 struct ib_send_wr *bad_send_wr;
1154 struct ib_mad_agent *mad_agent;
1156 unsigned long flags;
1159 /* Set WR ID to find mad_send_wr upon completion */
1160 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1161 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1162 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1163 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1165 mad_agent = mad_send_wr->send_buf.mad_agent;
1166 sge = mad_send_wr->sg_list;
1167 sge[0].addr = ib_dma_map_single(mad_agent->device,
1168 mad_send_wr->send_buf.mad,
1171 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1174 mad_send_wr->header_mapping = sge[0].addr;
1176 sge[1].addr = ib_dma_map_single(mad_agent->device,
1177 ib_get_payload(mad_send_wr),
1180 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1181 ib_dma_unmap_single(mad_agent->device,
1182 mad_send_wr->header_mapping,
1183 sge[0].length, DMA_TO_DEVICE);
1186 mad_send_wr->payload_mapping = sge[1].addr;
1188 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1189 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1190 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1192 list = &qp_info->send_queue.list;
1195 list = &qp_info->overflow_list;
1199 qp_info->send_queue.count++;
1200 list_add_tail(&mad_send_wr->mad_list.list, list);
1202 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1204 ib_dma_unmap_single(mad_agent->device,
1205 mad_send_wr->header_mapping,
1206 sge[0].length, DMA_TO_DEVICE);
1207 ib_dma_unmap_single(mad_agent->device,
1208 mad_send_wr->payload_mapping,
1209 sge[1].length, DMA_TO_DEVICE);
1215 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1216 * with the registered client
1218 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1219 struct ib_mad_send_buf **bad_send_buf)
1221 struct ib_mad_agent_private *mad_agent_priv;
1222 struct ib_mad_send_buf *next_send_buf;
1223 struct ib_mad_send_wr_private *mad_send_wr;
1224 unsigned long flags;
1227 /* Walk list of send WRs and post each on send list */
1228 for (; send_buf; send_buf = next_send_buf) {
1230 mad_send_wr = container_of(send_buf,
1231 struct ib_mad_send_wr_private,
1233 mad_agent_priv = mad_send_wr->mad_agent_priv;
1235 if (!send_buf->mad_agent->send_handler ||
1236 (send_buf->timeout_ms &&
1237 !send_buf->mad_agent->recv_handler)) {
1242 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1243 if (mad_agent_priv->agent.rmpp_version) {
1250 * Save pointer to next work request to post in case the
1251 * current one completes, and the user modifies the work
1252 * request associated with the completion
1254 next_send_buf = send_buf->next;
1255 mad_send_wr->send_wr.ah = send_buf->ah;
1257 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1258 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1259 ret = handle_outgoing_dr_smp(mad_agent_priv,
1261 if (ret < 0) /* error */
1263 else if (ret == 1) /* locally consumed */
1267 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1268 /* Timeout will be updated after send completes */
1269 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1270 mad_send_wr->max_retries = send_buf->retries;
1271 mad_send_wr->retries_left = send_buf->retries;
1272 send_buf->retries = 0;
1273 /* Reference for work request to QP + response */
1274 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1275 mad_send_wr->status = IB_WC_SUCCESS;
1277 /* Reference MAD agent until send completes */
1278 atomic_inc(&mad_agent_priv->refcount);
1279 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1280 list_add_tail(&mad_send_wr->agent_list,
1281 &mad_agent_priv->send_list);
1282 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1284 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1285 ret = ib_send_rmpp_mad(mad_send_wr);
1286 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1287 ret = ib_send_mad(mad_send_wr);
1289 ret = ib_send_mad(mad_send_wr);
1291 /* Fail send request */
1292 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1293 list_del(&mad_send_wr->agent_list);
1294 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1295 atomic_dec(&mad_agent_priv->refcount);
1302 *bad_send_buf = send_buf;
1305 EXPORT_SYMBOL(ib_post_send_mad);
1308 * ib_free_recv_mad - Returns data buffers used to receive
1309 * a MAD to the access layer
1311 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1313 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1314 struct ib_mad_private_header *mad_priv_hdr;
1315 struct ib_mad_private *priv;
1316 struct list_head free_list;
1318 INIT_LIST_HEAD(&free_list);
1319 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1321 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1323 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1325 mad_priv_hdr = container_of(mad_recv_wc,
1326 struct ib_mad_private_header,
1328 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1333 EXPORT_SYMBOL(ib_free_recv_mad);
1335 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1337 ib_mad_send_handler send_handler,
1338 ib_mad_recv_handler recv_handler,
1341 return ERR_PTR(-EINVAL); /* XXX: for now */
1343 EXPORT_SYMBOL(ib_redirect_mad_qp);
1345 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1348 dev_err(&mad_agent->device->dev,
1349 "ib_process_mad_wc() not implemented yet\n");
1352 EXPORT_SYMBOL(ib_process_mad_wc);
1354 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1355 struct ib_mad_reg_req *mad_reg_req)
1359 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1360 if ((*method)->agent[i]) {
1361 pr_err("Method %d already in use\n", i);
1368 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1370 /* Allocate management method table */
1371 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1373 pr_err("No memory for ib_mad_mgmt_method_table\n");
1381 * Check to see if there are any methods still in use
1383 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1387 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1388 if (method->agent[i])
1394 * Check to see if there are any method tables for this class still in use
1396 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1400 for (i = 0; i < MAX_MGMT_CLASS; i++)
1401 if (class->method_table[i])
1406 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1410 for (i = 0; i < MAX_MGMT_OUI; i++)
1411 if (vendor_class->method_table[i])
1416 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1421 for (i = 0; i < MAX_MGMT_OUI; i++)
1422 /* Is there matching OUI for this vendor class ? */
1423 if (!memcmp(vendor_class->oui[i], oui, 3))
1429 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1433 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1434 if (vendor->vendor_class[i])
1440 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1441 struct ib_mad_agent_private *agent)
1445 /* Remove any methods for this mad agent */
1446 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1447 if (method->agent[i] == agent) {
1448 method->agent[i] = NULL;
1453 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1454 struct ib_mad_agent_private *agent_priv,
1457 struct ib_mad_port_private *port_priv;
1458 struct ib_mad_mgmt_class_table **class;
1459 struct ib_mad_mgmt_method_table **method;
1462 port_priv = agent_priv->qp_info->port_priv;
1463 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1465 /* Allocate management class table for "new" class version */
1466 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1468 dev_err(&agent_priv->agent.device->dev,
1469 "No memory for ib_mad_mgmt_class_table\n");
1474 /* Allocate method table for this management class */
1475 method = &(*class)->method_table[mgmt_class];
1476 if ((ret = allocate_method_table(method)))
1479 method = &(*class)->method_table[mgmt_class];
1481 /* Allocate method table for this management class */
1482 if ((ret = allocate_method_table(method)))
1487 /* Now, make sure methods are not already in use */
1488 if (method_in_use(method, mad_reg_req))
1491 /* Finally, add in methods being registered */
1492 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1493 (*method)->agent[i] = agent_priv;
1498 /* Remove any methods for this mad agent */
1499 remove_methods_mad_agent(*method, agent_priv);
1500 /* Now, check to see if there are any methods in use */
1501 if (!check_method_table(*method)) {
1502 /* If not, release management method table */
1515 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1516 struct ib_mad_agent_private *agent_priv)
1518 struct ib_mad_port_private *port_priv;
1519 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1520 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1521 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1522 struct ib_mad_mgmt_method_table **method;
1523 int i, ret = -ENOMEM;
1526 /* "New" vendor (with OUI) class */
1527 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1528 port_priv = agent_priv->qp_info->port_priv;
1529 vendor_table = &port_priv->version[
1530 mad_reg_req->mgmt_class_version].vendor;
1531 if (!*vendor_table) {
1532 /* Allocate mgmt vendor class table for "new" class version */
1533 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1535 dev_err(&agent_priv->agent.device->dev,
1536 "No memory for ib_mad_mgmt_vendor_class_table\n");
1540 *vendor_table = vendor;
1542 if (!(*vendor_table)->vendor_class[vclass]) {
1543 /* Allocate table for this management vendor class */
1544 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1545 if (!vendor_class) {
1546 dev_err(&agent_priv->agent.device->dev,
1547 "No memory for ib_mad_mgmt_vendor_class\n");
1551 (*vendor_table)->vendor_class[vclass] = vendor_class;
1553 for (i = 0; i < MAX_MGMT_OUI; i++) {
1554 /* Is there matching OUI for this vendor class ? */
1555 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1556 mad_reg_req->oui, 3)) {
1557 method = &(*vendor_table)->vendor_class[
1558 vclass]->method_table[i];
1563 for (i = 0; i < MAX_MGMT_OUI; i++) {
1564 /* OUI slot available ? */
1565 if (!is_vendor_oui((*vendor_table)->vendor_class[
1567 method = &(*vendor_table)->vendor_class[
1568 vclass]->method_table[i];
1570 /* Allocate method table for this OUI */
1571 if ((ret = allocate_method_table(method)))
1573 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1574 mad_reg_req->oui, 3);
1578 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1582 /* Now, make sure methods are not already in use */
1583 if (method_in_use(method, mad_reg_req))
1586 /* Finally, add in methods being registered */
1587 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1588 (*method)->agent[i] = agent_priv;
1593 /* Remove any methods for this mad agent */
1594 remove_methods_mad_agent(*method, agent_priv);
1595 /* Now, check to see if there are any methods in use */
1596 if (!check_method_table(*method)) {
1597 /* If not, release management method table */
1604 (*vendor_table)->vendor_class[vclass] = NULL;
1605 kfree(vendor_class);
1609 *vendor_table = NULL;
1616 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1618 struct ib_mad_port_private *port_priv;
1619 struct ib_mad_mgmt_class_table *class;
1620 struct ib_mad_mgmt_method_table *method;
1621 struct ib_mad_mgmt_vendor_class_table *vendor;
1622 struct ib_mad_mgmt_vendor_class *vendor_class;
1627 * Was MAD registration request supplied
1628 * with original registration ?
1630 if (!agent_priv->reg_req) {
1634 port_priv = agent_priv->qp_info->port_priv;
1635 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1636 class = port_priv->version[
1637 agent_priv->reg_req->mgmt_class_version].class;
1641 method = class->method_table[mgmt_class];
1643 /* Remove any methods for this mad agent */
1644 remove_methods_mad_agent(method, agent_priv);
1645 /* Now, check to see if there are any methods still in use */
1646 if (!check_method_table(method)) {
1647 /* If not, release management method table */
1649 class->method_table[mgmt_class] = NULL;
1650 /* Any management classes left ? */
1651 if (!check_class_table(class)) {
1652 /* If not, release management class table */
1655 agent_priv->reg_req->
1656 mgmt_class_version].class = NULL;
1662 if (!is_vendor_class(mgmt_class))
1665 /* normalize mgmt_class to vendor range 2 */
1666 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1667 vendor = port_priv->version[
1668 agent_priv->reg_req->mgmt_class_version].vendor;
1673 vendor_class = vendor->vendor_class[mgmt_class];
1675 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1678 method = vendor_class->method_table[index];
1680 /* Remove any methods for this mad agent */
1681 remove_methods_mad_agent(method, agent_priv);
1683 * Now, check to see if there are
1684 * any methods still in use
1686 if (!check_method_table(method)) {
1687 /* If not, release management method table */
1689 vendor_class->method_table[index] = NULL;
1690 memset(vendor_class->oui[index], 0, 3);
1691 /* Any OUIs left ? */
1692 if (!check_vendor_class(vendor_class)) {
1693 /* If not, release vendor class table */
1694 kfree(vendor_class);
1695 vendor->vendor_class[mgmt_class] = NULL;
1696 /* Any other vendor classes left ? */
1697 if (!check_vendor_table(vendor)) {
1700 agent_priv->reg_req->
1701 mgmt_class_version].
1713 static struct ib_mad_agent_private *
1714 find_mad_agent(struct ib_mad_port_private *port_priv,
1715 const struct ib_mad_hdr *mad_hdr)
1717 struct ib_mad_agent_private *mad_agent = NULL;
1718 unsigned long flags;
1720 spin_lock_irqsave(&port_priv->reg_lock, flags);
1721 if (ib_response_mad(mad_hdr)) {
1723 struct ib_mad_agent_private *entry;
1726 * Routing is based on high 32 bits of transaction ID
1729 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1730 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1731 if (entry->agent.hi_tid == hi_tid) {
1737 struct ib_mad_mgmt_class_table *class;
1738 struct ib_mad_mgmt_method_table *method;
1739 struct ib_mad_mgmt_vendor_class_table *vendor;
1740 struct ib_mad_mgmt_vendor_class *vendor_class;
1741 const struct ib_vendor_mad *vendor_mad;
1745 * Routing is based on version, class, and method
1746 * For "newer" vendor MADs, also based on OUI
1748 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1750 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1751 class = port_priv->version[
1752 mad_hdr->class_version].class;
1755 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1756 ARRAY_SIZE(class->method_table))
1758 method = class->method_table[convert_mgmt_class(
1759 mad_hdr->mgmt_class)];
1761 mad_agent = method->agent[mad_hdr->method &
1762 ~IB_MGMT_METHOD_RESP];
1764 vendor = port_priv->version[
1765 mad_hdr->class_version].vendor;
1768 vendor_class = vendor->vendor_class[vendor_class_index(
1769 mad_hdr->mgmt_class)];
1772 /* Find matching OUI */
1773 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1774 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1777 method = vendor_class->method_table[index];
1779 mad_agent = method->agent[mad_hdr->method &
1780 ~IB_MGMT_METHOD_RESP];
1786 if (mad_agent->agent.recv_handler)
1787 atomic_inc(&mad_agent->refcount);
1789 dev_notice(&port_priv->device->dev,
1790 "No receive handler for client %p on port %d\n",
1791 &mad_agent->agent, port_priv->port_num);
1796 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1801 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1802 const struct ib_mad_qp_info *qp_info,
1806 u32 qp_num = qp_info->qp->qp_num;
1808 /* Make sure MAD base version is understood */
1809 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1810 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1811 pr_err("MAD received with unsupported base version %d %s\n",
1812 mad_hdr->base_version, opa ? "(opa)" : "");
1816 /* Filter SMI packets sent to other than QP0 */
1817 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1818 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1822 /* CM attributes other than ClassPortInfo only use Send method */
1823 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1824 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1825 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1827 /* Filter GSI packets sent to QP0 */
1836 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1837 const struct ib_mad_hdr *mad_hdr)
1839 const struct ib_rmpp_mad *rmpp_mad;
1841 rmpp_mad = (const struct ib_rmpp_mad *)mad_hdr;
1842 return !mad_agent_priv->agent.rmpp_version ||
1843 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1844 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1845 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1846 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1849 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1850 const struct ib_mad_recv_wc *rwc)
1852 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1853 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1856 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1857 const struct ib_mad_send_wr_private *wr,
1858 const struct ib_mad_recv_wc *rwc )
1860 struct ib_ah_attr attr;
1861 u8 send_resp, rcv_resp;
1863 struct ib_device *device = mad_agent_priv->agent.device;
1864 u8 port_num = mad_agent_priv->agent.port_num;
1867 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1868 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1870 if (send_resp == rcv_resp)
1871 /* both requests, or both responses. GIDs different */
1874 if (ib_query_ah(wr->send_buf.ah, &attr))
1875 /* Assume not equal, to avoid false positives. */
1878 if (!!(attr.ah_flags & IB_AH_GRH) !=
1879 !!(rwc->wc->wc_flags & IB_WC_GRH))
1880 /* one has GID, other does not. Assume different */
1883 if (!send_resp && rcv_resp) {
1884 /* is request/response. */
1885 if (!(attr.ah_flags & IB_AH_GRH)) {
1886 if (ib_get_cached_lmc(device, port_num, &lmc))
1888 return (!lmc || !((attr.src_path_bits ^
1889 rwc->wc->dlid_path_bits) &
1892 if (ib_get_cached_gid(device, port_num,
1893 attr.grh.sgid_index, &sgid, NULL))
1895 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1900 if (!(attr.ah_flags & IB_AH_GRH))
1901 return attr.dlid == rwc->wc->slid;
1903 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1907 static inline int is_direct(u8 class)
1909 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1912 struct ib_mad_send_wr_private*
1913 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1914 const struct ib_mad_recv_wc *wc)
1916 struct ib_mad_send_wr_private *wr;
1917 const struct ib_mad_hdr *mad_hdr;
1919 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1921 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1922 if ((wr->tid == mad_hdr->tid) &&
1923 rcv_has_same_class(wr, wc) &&
1925 * Don't check GID for direct routed MADs.
1926 * These might have permissive LIDs.
1928 (is_direct(mad_hdr->mgmt_class) ||
1929 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1930 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1934 * It's possible to receive the response before we've
1935 * been notified that the send has completed
1937 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1938 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1939 wr->tid == mad_hdr->tid &&
1941 rcv_has_same_class(wr, wc) &&
1943 * Don't check GID for direct routed MADs.
1944 * These might have permissive LIDs.
1946 (is_direct(mad_hdr->mgmt_class) ||
1947 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1948 /* Verify request has not been canceled */
1949 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1954 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1956 mad_send_wr->timeout = 0;
1957 if (mad_send_wr->refcount == 1)
1958 list_move_tail(&mad_send_wr->agent_list,
1959 &mad_send_wr->mad_agent_priv->done_list);
1962 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1963 struct ib_mad_recv_wc *mad_recv_wc)
1965 struct ib_mad_send_wr_private *mad_send_wr;
1966 struct ib_mad_send_wc mad_send_wc;
1967 unsigned long flags;
1969 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1970 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1971 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1972 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1975 deref_mad_agent(mad_agent_priv);
1980 /* Complete corresponding request */
1981 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1982 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1983 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1985 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1986 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1987 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1988 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1989 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1990 /* user rmpp is in effect
1991 * and this is an active RMPP MAD
1993 mad_agent_priv->agent.recv_handler(
1994 &mad_agent_priv->agent, NULL,
1996 atomic_dec(&mad_agent_priv->refcount);
1998 /* not user rmpp, revert to normal behavior and
2000 ib_free_recv_mad(mad_recv_wc);
2001 deref_mad_agent(mad_agent_priv);
2005 ib_mark_mad_done(mad_send_wr);
2006 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2008 /* Defined behavior is to complete response before request */
2009 mad_agent_priv->agent.recv_handler(
2010 &mad_agent_priv->agent,
2011 &mad_send_wr->send_buf,
2013 atomic_dec(&mad_agent_priv->refcount);
2015 mad_send_wc.status = IB_WC_SUCCESS;
2016 mad_send_wc.vendor_err = 0;
2017 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2018 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2021 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2023 deref_mad_agent(mad_agent_priv);
2027 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2028 const struct ib_mad_qp_info *qp_info,
2029 const struct ib_wc *wc,
2031 struct ib_mad_private *recv,
2032 struct ib_mad_private *response)
2034 enum smi_forward_action retsmi;
2035 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2037 if (smi_handle_dr_smp_recv(smp,
2038 rdma_cap_ib_switch(port_priv->device),
2040 port_priv->device->phys_port_cnt) ==
2042 return IB_SMI_DISCARD;
2044 retsmi = smi_check_forward_dr_smp(smp);
2045 if (retsmi == IB_SMI_LOCAL)
2046 return IB_SMI_HANDLE;
2048 if (retsmi == IB_SMI_SEND) { /* don't forward */
2049 if (smi_handle_dr_smp_send(smp,
2050 rdma_cap_ib_switch(port_priv->device),
2051 port_num) == IB_SMI_DISCARD)
2052 return IB_SMI_DISCARD;
2054 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2055 return IB_SMI_DISCARD;
2056 } else if (rdma_cap_ib_switch(port_priv->device)) {
2057 /* forward case for switches */
2058 memcpy(response, recv, mad_priv_size(response));
2059 response->header.recv_wc.wc = &response->header.wc;
2060 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2061 response->header.recv_wc.recv_buf.grh = &response->grh;
2063 agent_send_response((const struct ib_mad_hdr *)response->mad,
2066 smi_get_fwd_port(smp),
2067 qp_info->qp->qp_num,
2071 return IB_SMI_DISCARD;
2073 return IB_SMI_HANDLE;
2076 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2077 struct ib_mad_private *response,
2078 size_t *resp_len, bool opa)
2080 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2081 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2083 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2084 recv_hdr->method == IB_MGMT_METHOD_SET) {
2085 memcpy(response, recv, mad_priv_size(response));
2086 response->header.recv_wc.wc = &response->header.wc;
2087 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2088 response->header.recv_wc.recv_buf.grh = &response->grh;
2089 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2090 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2091 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2092 resp_hdr->status |= IB_SMP_DIRECTION;
2094 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2095 if (recv_hdr->mgmt_class ==
2096 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2097 recv_hdr->mgmt_class ==
2098 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2099 *resp_len = opa_get_smp_header_size(
2100 (const struct opa_smp *)recv->mad);
2102 *resp_len = sizeof(struct ib_mad_hdr);
2111 static enum smi_action
2112 handle_opa_smi(struct ib_mad_port_private *port_priv,
2113 struct ib_mad_qp_info *qp_info,
2116 struct ib_mad_private *recv,
2117 struct ib_mad_private *response)
2119 enum smi_forward_action retsmi;
2120 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2122 if (opa_smi_handle_dr_smp_recv(smp,
2123 rdma_cap_ib_switch(port_priv->device),
2125 port_priv->device->phys_port_cnt) ==
2127 return IB_SMI_DISCARD;
2129 retsmi = opa_smi_check_forward_dr_smp(smp);
2130 if (retsmi == IB_SMI_LOCAL)
2131 return IB_SMI_HANDLE;
2133 if (retsmi == IB_SMI_SEND) { /* don't forward */
2134 if (opa_smi_handle_dr_smp_send(smp,
2135 rdma_cap_ib_switch(port_priv->device),
2136 port_num) == IB_SMI_DISCARD)
2137 return IB_SMI_DISCARD;
2139 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2141 return IB_SMI_DISCARD;
2143 } else if (rdma_cap_ib_switch(port_priv->device)) {
2144 /* forward case for switches */
2145 memcpy(response, recv, mad_priv_size(response));
2146 response->header.recv_wc.wc = &response->header.wc;
2147 response->header.recv_wc.recv_buf.opa_mad =
2148 (struct opa_mad *)response->mad;
2149 response->header.recv_wc.recv_buf.grh = &response->grh;
2151 agent_send_response((const struct ib_mad_hdr *)response->mad,
2154 opa_smi_get_fwd_port(smp),
2155 qp_info->qp->qp_num,
2156 recv->header.wc.byte_len,
2159 return IB_SMI_DISCARD;
2162 return IB_SMI_HANDLE;
2165 static enum smi_action
2166 handle_smi(struct ib_mad_port_private *port_priv,
2167 struct ib_mad_qp_info *qp_info,
2170 struct ib_mad_private *recv,
2171 struct ib_mad_private *response,
2174 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2176 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2177 mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2178 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2181 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2184 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2186 struct ib_mad_port_private *port_priv = cq->cq_context;
2187 struct ib_mad_list_head *mad_list =
2188 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2189 struct ib_mad_qp_info *qp_info;
2190 struct ib_mad_private_header *mad_priv_hdr;
2191 struct ib_mad_private *recv, *response = NULL;
2192 struct ib_mad_agent_private *mad_agent;
2194 int ret = IB_MAD_RESULT_SUCCESS;
2196 u16 resp_mad_pkey_index = 0;
2199 if (list_empty_careful(&port_priv->port_list))
2202 if (wc->status != IB_WC_SUCCESS) {
2204 * Receive errors indicate that the QP has entered the error
2205 * state - error handling/shutdown code will cleanup
2210 qp_info = mad_list->mad_queue->qp_info;
2211 dequeue_mad(mad_list);
2213 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2214 qp_info->port_priv->port_num);
2216 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2218 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2219 ib_dma_unmap_single(port_priv->device,
2220 recv->header.mapping,
2221 mad_priv_dma_size(recv),
2224 /* Setup MAD receive work completion from "normal" work completion */
2225 recv->header.wc = *wc;
2226 recv->header.recv_wc.wc = &recv->header.wc;
2228 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2229 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2230 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2232 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2233 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2236 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2237 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2239 if (atomic_read(&qp_info->snoop_count))
2240 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2243 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2246 mad_size = recv->mad_size;
2247 response = alloc_mad_private(mad_size, GFP_KERNEL);
2249 dev_err(&port_priv->device->dev,
2250 "%s: no memory for response buffer\n", __func__);
2254 if (rdma_cap_ib_switch(port_priv->device))
2255 port_num = wc->port_num;
2257 port_num = port_priv->port_num;
2259 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2260 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2261 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2267 /* Give driver "right of first refusal" on incoming MAD */
2268 if (port_priv->device->process_mad) {
2269 ret = port_priv->device->process_mad(port_priv->device, 0,
2270 port_priv->port_num,
2272 (const struct ib_mad_hdr *)recv->mad,
2274 (struct ib_mad_hdr *)response->mad,
2275 &mad_size, &resp_mad_pkey_index);
2278 wc->pkey_index = resp_mad_pkey_index;
2280 if (ret & IB_MAD_RESULT_SUCCESS) {
2281 if (ret & IB_MAD_RESULT_CONSUMED)
2283 if (ret & IB_MAD_RESULT_REPLY) {
2284 agent_send_response((const struct ib_mad_hdr *)response->mad,
2288 qp_info->qp->qp_num,
2295 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2297 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2299 * recv is freed up in error cases in ib_mad_complete_recv
2300 * or via recv_handler in ib_mad_complete_recv()
2303 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2304 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2305 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2306 port_priv->device, port_num,
2307 qp_info->qp->qp_num, mad_size, opa);
2311 /* Post another receive request for this QP */
2313 ib_mad_post_receive_mads(qp_info, response);
2316 ib_mad_post_receive_mads(qp_info, recv);
2319 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2321 struct ib_mad_send_wr_private *mad_send_wr;
2322 unsigned long delay;
2324 if (list_empty(&mad_agent_priv->wait_list)) {
2325 cancel_delayed_work(&mad_agent_priv->timed_work);
2327 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2328 struct ib_mad_send_wr_private,
2331 if (time_after(mad_agent_priv->timeout,
2332 mad_send_wr->timeout)) {
2333 mad_agent_priv->timeout = mad_send_wr->timeout;
2334 delay = mad_send_wr->timeout - jiffies;
2335 if ((long)delay <= 0)
2337 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2338 &mad_agent_priv->timed_work, delay);
2343 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2345 struct ib_mad_agent_private *mad_agent_priv;
2346 struct ib_mad_send_wr_private *temp_mad_send_wr;
2347 struct list_head *list_item;
2348 unsigned long delay;
2350 mad_agent_priv = mad_send_wr->mad_agent_priv;
2351 list_del(&mad_send_wr->agent_list);
2353 delay = mad_send_wr->timeout;
2354 mad_send_wr->timeout += jiffies;
2357 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2358 temp_mad_send_wr = list_entry(list_item,
2359 struct ib_mad_send_wr_private,
2361 if (time_after(mad_send_wr->timeout,
2362 temp_mad_send_wr->timeout))
2367 list_item = &mad_agent_priv->wait_list;
2368 list_add(&mad_send_wr->agent_list, list_item);
2370 /* Reschedule a work item if we have a shorter timeout */
2371 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2372 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2373 &mad_agent_priv->timed_work, delay);
2376 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2379 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2380 wait_for_response(mad_send_wr);
2384 * Process a send work completion
2386 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2387 struct ib_mad_send_wc *mad_send_wc)
2389 struct ib_mad_agent_private *mad_agent_priv;
2390 unsigned long flags;
2393 mad_agent_priv = mad_send_wr->mad_agent_priv;
2394 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2395 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2396 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2397 if (ret == IB_RMPP_RESULT_CONSUMED)
2400 ret = IB_RMPP_RESULT_UNHANDLED;
2402 if (mad_send_wc->status != IB_WC_SUCCESS &&
2403 mad_send_wr->status == IB_WC_SUCCESS) {
2404 mad_send_wr->status = mad_send_wc->status;
2405 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2408 if (--mad_send_wr->refcount > 0) {
2409 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2410 mad_send_wr->status == IB_WC_SUCCESS) {
2411 wait_for_response(mad_send_wr);
2416 /* Remove send from MAD agent and notify client of completion */
2417 list_del(&mad_send_wr->agent_list);
2418 adjust_timeout(mad_agent_priv);
2419 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2421 if (mad_send_wr->status != IB_WC_SUCCESS )
2422 mad_send_wc->status = mad_send_wr->status;
2423 if (ret == IB_RMPP_RESULT_INTERNAL)
2424 ib_rmpp_send_handler(mad_send_wc);
2426 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2429 /* Release reference on agent taken when sending */
2430 deref_mad_agent(mad_agent_priv);
2433 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2436 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2438 struct ib_mad_port_private *port_priv = cq->cq_context;
2439 struct ib_mad_list_head *mad_list =
2440 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2441 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2442 struct ib_mad_qp_info *qp_info;
2443 struct ib_mad_queue *send_queue;
2444 struct ib_send_wr *bad_send_wr;
2445 struct ib_mad_send_wc mad_send_wc;
2446 unsigned long flags;
2449 if (list_empty_careful(&port_priv->port_list))
2452 if (wc->status != IB_WC_SUCCESS) {
2453 if (!ib_mad_send_error(port_priv, wc))
2457 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2459 send_queue = mad_list->mad_queue;
2460 qp_info = send_queue->qp_info;
2463 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2464 mad_send_wr->header_mapping,
2465 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2466 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2467 mad_send_wr->payload_mapping,
2468 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2469 queued_send_wr = NULL;
2470 spin_lock_irqsave(&send_queue->lock, flags);
2471 list_del(&mad_list->list);
2473 /* Move queued send to the send queue */
2474 if (send_queue->count-- > send_queue->max_active) {
2475 mad_list = container_of(qp_info->overflow_list.next,
2476 struct ib_mad_list_head, list);
2477 queued_send_wr = container_of(mad_list,
2478 struct ib_mad_send_wr_private,
2480 list_move_tail(&mad_list->list, &send_queue->list);
2482 spin_unlock_irqrestore(&send_queue->lock, flags);
2484 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2485 mad_send_wc.status = wc->status;
2486 mad_send_wc.vendor_err = wc->vendor_err;
2487 if (atomic_read(&qp_info->snoop_count))
2488 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2489 IB_MAD_SNOOP_SEND_COMPLETIONS);
2490 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2492 if (queued_send_wr) {
2493 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2496 dev_err(&port_priv->device->dev,
2497 "ib_post_send failed: %d\n", ret);
2498 mad_send_wr = queued_send_wr;
2499 wc->status = IB_WC_LOC_QP_OP_ERR;
2505 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2507 struct ib_mad_send_wr_private *mad_send_wr;
2508 struct ib_mad_list_head *mad_list;
2509 unsigned long flags;
2511 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2512 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2513 mad_send_wr = container_of(mad_list,
2514 struct ib_mad_send_wr_private,
2516 mad_send_wr->retry = 1;
2518 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2521 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2524 struct ib_mad_list_head *mad_list =
2525 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2526 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2527 struct ib_mad_send_wr_private *mad_send_wr;
2531 * Send errors will transition the QP to SQE - move
2532 * QP to RTS and repost flushed work requests
2534 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2536 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2537 if (mad_send_wr->retry) {
2539 struct ib_send_wr *bad_send_wr;
2541 mad_send_wr->retry = 0;
2542 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2548 struct ib_qp_attr *attr;
2550 /* Transition QP to RTS and fail offending send */
2551 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2553 attr->qp_state = IB_QPS_RTS;
2554 attr->cur_qp_state = IB_QPS_SQE;
2555 ret = ib_modify_qp(qp_info->qp, attr,
2556 IB_QP_STATE | IB_QP_CUR_STATE);
2559 dev_err(&port_priv->device->dev,
2560 "%s - ib_modify_qp to RTS: %d\n",
2563 mark_sends_for_retry(qp_info);
2570 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2572 unsigned long flags;
2573 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2574 struct ib_mad_send_wc mad_send_wc;
2575 struct list_head cancel_list;
2577 INIT_LIST_HEAD(&cancel_list);
2579 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2580 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2581 &mad_agent_priv->send_list, agent_list) {
2582 if (mad_send_wr->status == IB_WC_SUCCESS) {
2583 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2584 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2588 /* Empty wait list to prevent receives from finding a request */
2589 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2590 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2592 /* Report all cancelled requests */
2593 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2594 mad_send_wc.vendor_err = 0;
2596 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2597 &cancel_list, agent_list) {
2598 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2599 list_del(&mad_send_wr->agent_list);
2600 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2602 atomic_dec(&mad_agent_priv->refcount);
2606 static struct ib_mad_send_wr_private*
2607 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2608 struct ib_mad_send_buf *send_buf)
2610 struct ib_mad_send_wr_private *mad_send_wr;
2612 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2614 if (&mad_send_wr->send_buf == send_buf)
2618 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2620 if (is_rmpp_data_mad(mad_agent_priv,
2621 mad_send_wr->send_buf.mad) &&
2622 &mad_send_wr->send_buf == send_buf)
2628 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2629 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2631 struct ib_mad_agent_private *mad_agent_priv;
2632 struct ib_mad_send_wr_private *mad_send_wr;
2633 unsigned long flags;
2636 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2638 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2639 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2640 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2641 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2645 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2647 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2648 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2651 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2653 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2655 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2657 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2660 EXPORT_SYMBOL(ib_modify_mad);
2662 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2663 struct ib_mad_send_buf *send_buf)
2665 ib_modify_mad(mad_agent, send_buf, 0);
2667 EXPORT_SYMBOL(ib_cancel_mad);
2669 static void local_completions(struct work_struct *work)
2671 struct ib_mad_agent_private *mad_agent_priv;
2672 struct ib_mad_local_private *local;
2673 struct ib_mad_agent_private *recv_mad_agent;
2674 unsigned long flags;
2677 struct ib_mad_send_wc mad_send_wc;
2681 container_of(work, struct ib_mad_agent_private, local_work);
2683 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2684 mad_agent_priv->qp_info->port_priv->port_num);
2686 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2687 while (!list_empty(&mad_agent_priv->local_list)) {
2688 local = list_entry(mad_agent_priv->local_list.next,
2689 struct ib_mad_local_private,
2691 list_del(&local->completion_list);
2692 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2694 if (local->mad_priv) {
2696 recv_mad_agent = local->recv_mad_agent;
2697 if (!recv_mad_agent) {
2698 dev_err(&mad_agent_priv->agent.device->dev,
2699 "No receive MAD agent for local completion\n");
2701 goto local_send_completion;
2705 * Defined behavior is to complete response
2708 build_smp_wc(recv_mad_agent->agent.qp,
2709 local->mad_send_wr->send_wr.wr.wr_cqe,
2710 be16_to_cpu(IB_LID_PERMISSIVE),
2711 local->mad_send_wr->send_wr.pkey_index,
2712 recv_mad_agent->agent.port_num, &wc);
2714 local->mad_priv->header.recv_wc.wc = &wc;
2716 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2717 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2718 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2719 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2721 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2722 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2725 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2726 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2727 &local->mad_priv->header.recv_wc.rmpp_list);
2728 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2729 local->mad_priv->header.recv_wc.recv_buf.mad =
2730 (struct ib_mad *)local->mad_priv->mad;
2731 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2732 snoop_recv(recv_mad_agent->qp_info,
2733 &local->mad_priv->header.recv_wc,
2734 IB_MAD_SNOOP_RECVS);
2735 recv_mad_agent->agent.recv_handler(
2736 &recv_mad_agent->agent,
2737 &local->mad_send_wr->send_buf,
2738 &local->mad_priv->header.recv_wc);
2739 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2740 atomic_dec(&recv_mad_agent->refcount);
2741 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2744 local_send_completion:
2746 mad_send_wc.status = IB_WC_SUCCESS;
2747 mad_send_wc.vendor_err = 0;
2748 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2749 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2750 snoop_send(mad_agent_priv->qp_info,
2751 &local->mad_send_wr->send_buf,
2752 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2753 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2756 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2757 atomic_dec(&mad_agent_priv->refcount);
2759 kfree(local->mad_priv);
2762 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2765 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2769 if (!mad_send_wr->retries_left)
2772 mad_send_wr->retries_left--;
2773 mad_send_wr->send_buf.retries++;
2775 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2777 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2778 ret = ib_retry_rmpp(mad_send_wr);
2780 case IB_RMPP_RESULT_UNHANDLED:
2781 ret = ib_send_mad(mad_send_wr);
2783 case IB_RMPP_RESULT_CONSUMED:
2791 ret = ib_send_mad(mad_send_wr);
2794 mad_send_wr->refcount++;
2795 list_add_tail(&mad_send_wr->agent_list,
2796 &mad_send_wr->mad_agent_priv->send_list);
2801 static void timeout_sends(struct work_struct *work)
2803 struct ib_mad_agent_private *mad_agent_priv;
2804 struct ib_mad_send_wr_private *mad_send_wr;
2805 struct ib_mad_send_wc mad_send_wc;
2806 unsigned long flags, delay;
2808 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2810 mad_send_wc.vendor_err = 0;
2812 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2813 while (!list_empty(&mad_agent_priv->wait_list)) {
2814 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2815 struct ib_mad_send_wr_private,
2818 if (time_after(mad_send_wr->timeout, jiffies)) {
2819 delay = mad_send_wr->timeout - jiffies;
2820 if ((long)delay <= 0)
2822 queue_delayed_work(mad_agent_priv->qp_info->
2824 &mad_agent_priv->timed_work, delay);
2828 list_del(&mad_send_wr->agent_list);
2829 if (mad_send_wr->status == IB_WC_SUCCESS &&
2830 !retry_send(mad_send_wr))
2833 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2835 if (mad_send_wr->status == IB_WC_SUCCESS)
2836 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2838 mad_send_wc.status = mad_send_wr->status;
2839 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2840 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2843 atomic_dec(&mad_agent_priv->refcount);
2844 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2846 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2850 * Allocate receive MADs and post receive WRs for them
2852 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2853 struct ib_mad_private *mad)
2855 unsigned long flags;
2857 struct ib_mad_private *mad_priv;
2858 struct ib_sge sg_list;
2859 struct ib_recv_wr recv_wr, *bad_recv_wr;
2860 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2862 /* Initialize common scatter list fields */
2863 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2865 /* Initialize common receive WR fields */
2866 recv_wr.next = NULL;
2867 recv_wr.sg_list = &sg_list;
2868 recv_wr.num_sge = 1;
2871 /* Allocate and map receive buffer */
2876 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2879 dev_err(&qp_info->port_priv->device->dev,
2880 "No memory for receive buffer\n");
2885 sg_list.length = mad_priv_dma_size(mad_priv);
2886 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2888 mad_priv_dma_size(mad_priv),
2890 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2895 mad_priv->header.mapping = sg_list.addr;
2896 mad_priv->header.mad_list.mad_queue = recv_queue;
2897 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2898 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2900 /* Post receive WR */
2901 spin_lock_irqsave(&recv_queue->lock, flags);
2902 post = (++recv_queue->count < recv_queue->max_active);
2903 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2904 spin_unlock_irqrestore(&recv_queue->lock, flags);
2905 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2907 spin_lock_irqsave(&recv_queue->lock, flags);
2908 list_del(&mad_priv->header.mad_list.list);
2909 recv_queue->count--;
2910 spin_unlock_irqrestore(&recv_queue->lock, flags);
2911 ib_dma_unmap_single(qp_info->port_priv->device,
2912 mad_priv->header.mapping,
2913 mad_priv_dma_size(mad_priv),
2916 dev_err(&qp_info->port_priv->device->dev,
2917 "ib_post_recv failed: %d\n", ret);
2926 * Return all the posted receive MADs
2928 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2930 struct ib_mad_private_header *mad_priv_hdr;
2931 struct ib_mad_private *recv;
2932 struct ib_mad_list_head *mad_list;
2937 while (!list_empty(&qp_info->recv_queue.list)) {
2939 mad_list = list_entry(qp_info->recv_queue.list.next,
2940 struct ib_mad_list_head, list);
2941 mad_priv_hdr = container_of(mad_list,
2942 struct ib_mad_private_header,
2944 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2947 /* Remove from posted receive MAD list */
2948 list_del(&mad_list->list);
2950 ib_dma_unmap_single(qp_info->port_priv->device,
2951 recv->header.mapping,
2952 mad_priv_dma_size(recv),
2957 qp_info->recv_queue.count = 0;
2963 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2966 struct ib_qp_attr *attr;
2970 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2972 dev_err(&port_priv->device->dev,
2973 "Couldn't kmalloc ib_qp_attr\n");
2977 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2978 IB_DEFAULT_PKEY_FULL, &pkey_index);
2982 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2983 qp = port_priv->qp_info[i].qp;
2988 * PKey index for QP1 is irrelevant but
2989 * one is needed for the Reset to Init transition
2991 attr->qp_state = IB_QPS_INIT;
2992 attr->pkey_index = pkey_index;
2993 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2994 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2995 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2997 dev_err(&port_priv->device->dev,
2998 "Couldn't change QP%d state to INIT: %d\n",
3003 attr->qp_state = IB_QPS_RTR;
3004 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3006 dev_err(&port_priv->device->dev,
3007 "Couldn't change QP%d state to RTR: %d\n",
3012 attr->qp_state = IB_QPS_RTS;
3013 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3014 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3016 dev_err(&port_priv->device->dev,
3017 "Couldn't change QP%d state to RTS: %d\n",
3023 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3025 dev_err(&port_priv->device->dev,
3026 "Failed to request completion notification: %d\n",
3031 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3032 if (!port_priv->qp_info[i].qp)
3035 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3037 dev_err(&port_priv->device->dev,
3038 "Couldn't post receive WRs\n");
3047 static void qp_event_handler(struct ib_event *event, void *qp_context)
3049 struct ib_mad_qp_info *qp_info = qp_context;
3051 /* It's worse than that! He's dead, Jim! */
3052 dev_err(&qp_info->port_priv->device->dev,
3053 "Fatal error (%d) on MAD QP (%d)\n",
3054 event->event, qp_info->qp->qp_num);
3057 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3058 struct ib_mad_queue *mad_queue)
3060 mad_queue->qp_info = qp_info;
3061 mad_queue->count = 0;
3062 spin_lock_init(&mad_queue->lock);
3063 INIT_LIST_HEAD(&mad_queue->list);
3066 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3067 struct ib_mad_qp_info *qp_info)
3069 qp_info->port_priv = port_priv;
3070 init_mad_queue(qp_info, &qp_info->send_queue);
3071 init_mad_queue(qp_info, &qp_info->recv_queue);
3072 INIT_LIST_HEAD(&qp_info->overflow_list);
3073 spin_lock_init(&qp_info->snoop_lock);
3074 qp_info->snoop_table = NULL;
3075 qp_info->snoop_table_size = 0;
3076 atomic_set(&qp_info->snoop_count, 0);
3079 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3080 enum ib_qp_type qp_type)
3082 struct ib_qp_init_attr qp_init_attr;
3085 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3086 qp_init_attr.send_cq = qp_info->port_priv->cq;
3087 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3088 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3089 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3090 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3091 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3092 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3093 qp_init_attr.qp_type = qp_type;
3094 qp_init_attr.port_num = qp_info->port_priv->port_num;
3095 qp_init_attr.qp_context = qp_info;
3096 qp_init_attr.event_handler = qp_event_handler;
3097 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3098 if (IS_ERR(qp_info->qp)) {
3099 dev_err(&qp_info->port_priv->device->dev,
3100 "Couldn't create ib_mad QP%d\n",
3101 get_spl_qp_index(qp_type));
3102 ret = PTR_ERR(qp_info->qp);
3105 /* Use minimum queue sizes unless the CQ is resized */
3106 qp_info->send_queue.max_active = mad_sendq_size;
3107 qp_info->recv_queue.max_active = mad_recvq_size;
3114 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3119 ib_destroy_qp(qp_info->qp);
3120 kfree(qp_info->snoop_table);
3125 * Create the QP, PD, MR, and CQ if needed
3127 static int ib_mad_port_open(struct ib_device *device,
3131 struct ib_mad_port_private *port_priv;
3132 unsigned long flags;
3133 char name[sizeof "ib_mad123"];
3136 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3139 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3140 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3143 /* Create new device info */
3144 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3146 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
3150 port_priv->device = device;
3151 port_priv->port_num = port_num;
3152 spin_lock_init(&port_priv->reg_lock);
3153 INIT_LIST_HEAD(&port_priv->agent_list);
3154 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3155 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3157 cq_size = mad_sendq_size + mad_recvq_size;
3158 has_smi = rdma_cap_ib_smi(device, port_num);
3162 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3164 if (IS_ERR(port_priv->cq)) {
3165 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3166 ret = PTR_ERR(port_priv->cq);
3170 port_priv->pd = ib_alloc_pd(device, 0);
3171 if (IS_ERR(port_priv->pd)) {
3172 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3173 ret = PTR_ERR(port_priv->pd);
3178 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3182 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3186 snprintf(name, sizeof name, "ib_mad%d", port_num);
3187 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3188 if (!port_priv->wq) {
3193 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3194 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3195 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3197 ret = ib_mad_port_start(port_priv);
3199 dev_err(&device->dev, "Couldn't start port\n");
3206 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3207 list_del_init(&port_priv->port_list);
3208 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3210 destroy_workqueue(port_priv->wq);
3212 destroy_mad_qp(&port_priv->qp_info[1]);
3214 destroy_mad_qp(&port_priv->qp_info[0]);
3216 ib_dealloc_pd(port_priv->pd);
3218 ib_free_cq(port_priv->cq);
3219 cleanup_recv_queue(&port_priv->qp_info[1]);
3220 cleanup_recv_queue(&port_priv->qp_info[0]);
3229 * If there are no classes using the port, free the port
3230 * resources (CQ, MR, PD, QP) and remove the port's info structure
3232 static int ib_mad_port_close(struct ib_device *device, int port_num)
3234 struct ib_mad_port_private *port_priv;
3235 unsigned long flags;
3237 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3238 port_priv = __ib_get_mad_port(device, port_num);
3239 if (port_priv == NULL) {
3240 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3241 dev_err(&device->dev, "Port %d not found\n", port_num);
3244 list_del_init(&port_priv->port_list);
3245 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3247 destroy_workqueue(port_priv->wq);
3248 destroy_mad_qp(&port_priv->qp_info[1]);
3249 destroy_mad_qp(&port_priv->qp_info[0]);
3250 ib_dealloc_pd(port_priv->pd);
3251 ib_free_cq(port_priv->cq);
3252 cleanup_recv_queue(&port_priv->qp_info[1]);
3253 cleanup_recv_queue(&port_priv->qp_info[0]);
3254 /* XXX: Handle deallocation of MAD registration tables */
3261 static void ib_mad_init_device(struct ib_device *device)
3265 start = rdma_start_port(device);
3267 for (i = start; i <= rdma_end_port(device); i++) {
3268 if (!rdma_cap_ib_mad(device, i))
3271 if (ib_mad_port_open(device, i)) {
3272 dev_err(&device->dev, "Couldn't open port %d\n", i);
3275 if (ib_agent_port_open(device, i)) {
3276 dev_err(&device->dev,
3277 "Couldn't open port %d for agents\n", i);
3284 if (ib_mad_port_close(device, i))
3285 dev_err(&device->dev, "Couldn't close port %d\n", i);
3288 while (--i >= start) {
3289 if (!rdma_cap_ib_mad(device, i))
3292 if (ib_agent_port_close(device, i))
3293 dev_err(&device->dev,
3294 "Couldn't close port %d for agents\n", i);
3295 if (ib_mad_port_close(device, i))
3296 dev_err(&device->dev, "Couldn't close port %d\n", i);
3300 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3304 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3305 if (!rdma_cap_ib_mad(device, i))
3308 if (ib_agent_port_close(device, i))
3309 dev_err(&device->dev,
3310 "Couldn't close port %d for agents\n", i);
3311 if (ib_mad_port_close(device, i))
3312 dev_err(&device->dev, "Couldn't close port %d\n", i);
3316 static struct ib_client mad_client = {
3318 .add = ib_mad_init_device,
3319 .remove = ib_mad_remove_device
3322 int ib_mad_init(void)
3324 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3325 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3327 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3328 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3330 INIT_LIST_HEAD(&ib_mad_port_list);
3332 if (ib_register_client(&mad_client)) {
3333 pr_err("Couldn't register ib_mad client\n");
3340 void ib_mad_cleanup(void)
3342 ib_unregister_client(&mad_client);