2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/dma-mapping.h>
37 #include <rdma/ib_cache.h>
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_DESCRIPTION("kernel IB MAD API");
46 MODULE_AUTHOR("Hal Rosenstock");
47 MODULE_AUTHOR("Sean Hefty");
49 int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
50 int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
52 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
53 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
54 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
55 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
57 static struct kmem_cache *ib_mad_cache;
59 static struct list_head ib_mad_port_list;
60 static u32 ib_mad_client_id = 0;
63 static spinlock_t ib_mad_port_list_lock;
66 /* Forward declarations */
67 static int method_in_use(struct ib_mad_mgmt_method_table **method,
68 struct ib_mad_reg_req *mad_reg_req);
69 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
70 static struct ib_mad_agent_private *find_mad_agent(
71 struct ib_mad_port_private *port_priv,
73 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
74 struct ib_mad_private *mad);
75 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
76 static void timeout_sends(struct work_struct *work);
77 static void local_completions(struct work_struct *work);
78 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
79 struct ib_mad_agent_private *agent_priv,
81 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
82 struct ib_mad_agent_private *agent_priv);
85 * Returns a ib_mad_port_private structure or NULL for a device/port
86 * Assumes ib_mad_port_list_lock is being held
88 static inline struct ib_mad_port_private *
89 __ib_get_mad_port(struct ib_device *device, int port_num)
91 struct ib_mad_port_private *entry;
93 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
94 if (entry->device == device && entry->port_num == port_num)
101 * Wrapper function to return a ib_mad_port_private structure or NULL
104 static inline struct ib_mad_port_private *
105 ib_get_mad_port(struct ib_device *device, int port_num)
107 struct ib_mad_port_private *entry;
110 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
111 entry = __ib_get_mad_port(device, port_num);
112 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
117 static inline u8 convert_mgmt_class(u8 mgmt_class)
119 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
120 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
124 static int get_spl_qp_index(enum ib_qp_type qp_type)
137 static int vendor_class_index(u8 mgmt_class)
139 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
142 static int is_vendor_class(u8 mgmt_class)
144 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
145 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
150 static int is_vendor_oui(char *oui)
152 if (oui[0] || oui[1] || oui[2])
157 static int is_vendor_method_in_use(
158 struct ib_mad_mgmt_vendor_class *vendor_class,
159 struct ib_mad_reg_req *mad_reg_req)
161 struct ib_mad_mgmt_method_table *method;
164 for (i = 0; i < MAX_MGMT_OUI; i++) {
165 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
166 method = vendor_class->method_table[i];
168 if (method_in_use(&method, mad_reg_req))
178 int ib_response_mad(struct ib_mad *mad)
180 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
181 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
182 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
183 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
185 EXPORT_SYMBOL(ib_response_mad);
187 static void timeout_callback(unsigned long data)
189 struct ib_mad_agent_private *mad_agent_priv =
190 (struct ib_mad_agent_private *) data;
192 queue_work(mad_agent_priv->qp_info->port_priv->wq,
193 &mad_agent_priv->timeout_work);
197 * ib_register_mad_agent - Register to send/receive MADs
199 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
201 enum ib_qp_type qp_type,
202 struct ib_mad_reg_req *mad_reg_req,
204 ib_mad_send_handler send_handler,
205 ib_mad_recv_handler recv_handler,
208 struct ib_mad_port_private *port_priv;
209 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
210 struct ib_mad_agent_private *mad_agent_priv;
211 struct ib_mad_reg_req *reg_req = NULL;
212 struct ib_mad_mgmt_class_table *class;
213 struct ib_mad_mgmt_vendor_class_table *vendor;
214 struct ib_mad_mgmt_vendor_class *vendor_class;
215 struct ib_mad_mgmt_method_table *method;
218 u8 mgmt_class, vclass;
220 /* Validate parameters */
221 qpn = get_spl_qp_index(qp_type);
225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
228 /* Validate MAD registration request if supplied */
230 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
234 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
236 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
237 * one in this range currently allowed
239 if (mad_reg_req->mgmt_class !=
240 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
242 } else if (mad_reg_req->mgmt_class == 0) {
244 * Class 0 is reserved in IBA and is used for
245 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
248 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
250 * If class is in "new" vendor range,
251 * ensure supplied OUI is not zero
253 if (!is_vendor_oui(mad_reg_req->oui))
256 /* Make sure class supplied is consistent with RMPP */
257 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
261 /* Make sure class supplied is consistent with QP type */
262 if (qp_type == IB_QPT_SMI) {
263 if ((mad_reg_req->mgmt_class !=
264 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
265 (mad_reg_req->mgmt_class !=
266 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
269 if ((mad_reg_req->mgmt_class ==
270 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
271 (mad_reg_req->mgmt_class ==
272 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
276 /* No registration request supplied */
281 /* Validate device and port */
282 port_priv = ib_get_mad_port(device, port_num);
284 ret = ERR_PTR(-ENODEV);
288 /* Allocate structures */
289 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
290 if (!mad_agent_priv) {
291 ret = ERR_PTR(-ENOMEM);
295 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
296 IB_ACCESS_LOCAL_WRITE);
297 if (IS_ERR(mad_agent_priv->agent.mr)) {
298 ret = ERR_PTR(-ENOMEM);
303 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
305 ret = ERR_PTR(-ENOMEM);
308 /* Make a copy of the MAD registration request */
309 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
312 /* Now, fill in the various structures */
313 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
314 mad_agent_priv->reg_req = reg_req;
315 mad_agent_priv->agent.rmpp_version = rmpp_version;
316 mad_agent_priv->agent.device = device;
317 mad_agent_priv->agent.recv_handler = recv_handler;
318 mad_agent_priv->agent.send_handler = send_handler;
319 mad_agent_priv->agent.context = context;
320 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
321 mad_agent_priv->agent.port_num = port_num;
322 spin_lock_init(&mad_agent_priv->lock);
323 INIT_LIST_HEAD(&mad_agent_priv->send_list);
324 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
325 INIT_LIST_HEAD(&mad_agent_priv->done_list);
326 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
327 INIT_WORK(&mad_agent_priv->timeout_work, timeout_sends);
328 setup_timer(&mad_agent_priv->timeout_timer, timeout_callback,
329 (unsigned long) mad_agent_priv);
330 INIT_LIST_HEAD(&mad_agent_priv->local_list);
331 INIT_WORK(&mad_agent_priv->local_work, local_completions);
332 atomic_set(&mad_agent_priv->refcount, 1);
333 init_completion(&mad_agent_priv->comp);
335 spin_lock_irqsave(&port_priv->reg_lock, flags);
336 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
339 * Make sure MAD registration (if supplied)
340 * is non overlapping with any existing ones
343 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
344 if (!is_vendor_class(mgmt_class)) {
345 class = port_priv->version[mad_reg_req->
346 mgmt_class_version].class;
348 method = class->method_table[mgmt_class];
350 if (method_in_use(&method,
355 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
358 /* "New" vendor class range */
359 vendor = port_priv->version[mad_reg_req->
360 mgmt_class_version].vendor;
362 vclass = vendor_class_index(mgmt_class);
363 vendor_class = vendor->vendor_class[vclass];
365 if (is_vendor_method_in_use(
371 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
379 /* Add mad agent into port's agent list */
380 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
381 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
383 return &mad_agent_priv->agent;
386 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
389 ib_dereg_mr(mad_agent_priv->agent.mr);
391 kfree(mad_agent_priv);
395 EXPORT_SYMBOL(ib_register_mad_agent);
397 static inline int is_snooping_sends(int mad_snoop_flags)
399 return (mad_snoop_flags &
400 (/*IB_MAD_SNOOP_POSTED_SENDS |
401 IB_MAD_SNOOP_RMPP_SENDS |*/
402 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
403 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
406 static inline int is_snooping_recvs(int mad_snoop_flags)
408 return (mad_snoop_flags &
409 (IB_MAD_SNOOP_RECVS /*|
410 IB_MAD_SNOOP_RMPP_RECVS*/));
413 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
414 struct ib_mad_snoop_private *mad_snoop_priv)
416 struct ib_mad_snoop_private **new_snoop_table;
420 spin_lock_irqsave(&qp_info->snoop_lock, flags);
421 /* Check for empty slot in array. */
422 for (i = 0; i < qp_info->snoop_table_size; i++)
423 if (!qp_info->snoop_table[i])
426 if (i == qp_info->snoop_table_size) {
428 new_snoop_table = krealloc(qp_info->snoop_table,
429 sizeof mad_snoop_priv *
430 (qp_info->snoop_table_size + 1),
432 if (!new_snoop_table) {
437 qp_info->snoop_table = new_snoop_table;
438 qp_info->snoop_table_size++;
440 qp_info->snoop_table[i] = mad_snoop_priv;
441 atomic_inc(&qp_info->snoop_count);
443 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
447 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
449 enum ib_qp_type qp_type,
451 ib_mad_snoop_handler snoop_handler,
452 ib_mad_recv_handler recv_handler,
455 struct ib_mad_port_private *port_priv;
456 struct ib_mad_agent *ret;
457 struct ib_mad_snoop_private *mad_snoop_priv;
460 /* Validate parameters */
461 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
462 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
463 ret = ERR_PTR(-EINVAL);
466 qpn = get_spl_qp_index(qp_type);
468 ret = ERR_PTR(-EINVAL);
471 port_priv = ib_get_mad_port(device, port_num);
473 ret = ERR_PTR(-ENODEV);
476 /* Allocate structures */
477 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
478 if (!mad_snoop_priv) {
479 ret = ERR_PTR(-ENOMEM);
483 /* Now, fill in the various structures */
484 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
485 mad_snoop_priv->agent.device = device;
486 mad_snoop_priv->agent.recv_handler = recv_handler;
487 mad_snoop_priv->agent.snoop_handler = snoop_handler;
488 mad_snoop_priv->agent.context = context;
489 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
490 mad_snoop_priv->agent.port_num = port_num;
491 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
492 init_completion(&mad_snoop_priv->comp);
493 mad_snoop_priv->snoop_index = register_snoop_agent(
494 &port_priv->qp_info[qpn],
496 if (mad_snoop_priv->snoop_index < 0) {
497 ret = ERR_PTR(mad_snoop_priv->snoop_index);
501 atomic_set(&mad_snoop_priv->refcount, 1);
502 return &mad_snoop_priv->agent;
505 kfree(mad_snoop_priv);
509 EXPORT_SYMBOL(ib_register_mad_snoop);
511 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
513 if (atomic_dec_and_test(&mad_agent_priv->refcount))
514 complete(&mad_agent_priv->comp);
517 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
519 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
520 complete(&mad_snoop_priv->comp);
523 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
525 struct ib_mad_port_private *port_priv;
528 /* Note that we could still be handling received MADs */
531 * Canceling all sends results in dropping received response
532 * MADs, preventing us from queuing additional work
534 cancel_mads(mad_agent_priv);
535 port_priv = mad_agent_priv->qp_info->port_priv;
536 del_timer_sync(&mad_agent_priv->timeout_timer);
537 cancel_work_sync(&mad_agent_priv->timeout_work);
539 spin_lock_irqsave(&port_priv->reg_lock, flags);
540 remove_mad_reg_req(mad_agent_priv);
541 list_del(&mad_agent_priv->agent_list);
542 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
544 flush_workqueue(port_priv->wq);
545 ib_cancel_rmpp_recvs(mad_agent_priv);
547 deref_mad_agent(mad_agent_priv);
548 wait_for_completion(&mad_agent_priv->comp);
550 kfree(mad_agent_priv->reg_req);
551 ib_dereg_mr(mad_agent_priv->agent.mr);
552 kfree(mad_agent_priv);
555 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
557 struct ib_mad_qp_info *qp_info;
560 qp_info = mad_snoop_priv->qp_info;
561 spin_lock_irqsave(&qp_info->snoop_lock, flags);
562 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
563 atomic_dec(&qp_info->snoop_count);
564 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
566 deref_snoop_agent(mad_snoop_priv);
567 wait_for_completion(&mad_snoop_priv->comp);
569 kfree(mad_snoop_priv);
573 * ib_unregister_mad_agent - Unregisters a client from using MAD services
575 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
577 struct ib_mad_agent_private *mad_agent_priv;
578 struct ib_mad_snoop_private *mad_snoop_priv;
580 /* If the TID is zero, the agent can only snoop. */
581 if (mad_agent->hi_tid) {
582 mad_agent_priv = container_of(mad_agent,
583 struct ib_mad_agent_private,
585 unregister_mad_agent(mad_agent_priv);
587 mad_snoop_priv = container_of(mad_agent,
588 struct ib_mad_snoop_private,
590 unregister_mad_snoop(mad_snoop_priv);
594 EXPORT_SYMBOL(ib_unregister_mad_agent);
596 static void dequeue_mad(struct ib_mad_list_head *mad_list)
598 struct ib_mad_queue *mad_queue;
601 BUG_ON(!mad_list->mad_queue);
602 mad_queue = mad_list->mad_queue;
603 spin_lock_irqsave(&mad_queue->lock, flags);
604 list_del(&mad_list->list);
606 spin_unlock_irqrestore(&mad_queue->lock, flags);
609 static void snoop_send(struct ib_mad_qp_info *qp_info,
610 struct ib_mad_send_buf *send_buf,
611 struct ib_mad_send_wc *mad_send_wc,
614 struct ib_mad_snoop_private *mad_snoop_priv;
618 spin_lock_irqsave(&qp_info->snoop_lock, flags);
619 for (i = 0; i < qp_info->snoop_table_size; i++) {
620 mad_snoop_priv = qp_info->snoop_table[i];
621 if (!mad_snoop_priv ||
622 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
625 atomic_inc(&mad_snoop_priv->refcount);
626 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
627 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
628 send_buf, mad_send_wc);
629 deref_snoop_agent(mad_snoop_priv);
630 spin_lock_irqsave(&qp_info->snoop_lock, flags);
632 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
635 static void snoop_recv(struct ib_mad_qp_info *qp_info,
636 struct ib_mad_recv_wc *mad_recv_wc,
639 struct ib_mad_snoop_private *mad_snoop_priv;
643 spin_lock_irqsave(&qp_info->snoop_lock, flags);
644 for (i = 0; i < qp_info->snoop_table_size; i++) {
645 mad_snoop_priv = qp_info->snoop_table[i];
646 if (!mad_snoop_priv ||
647 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
650 atomic_inc(&mad_snoop_priv->refcount);
651 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
652 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
654 deref_snoop_agent(mad_snoop_priv);
655 spin_lock_irqsave(&qp_info->snoop_lock, flags);
657 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
660 static void build_smp_wc(struct ib_qp *qp,
661 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
664 memset(wc, 0, sizeof *wc);
666 wc->status = IB_WC_SUCCESS;
667 wc->opcode = IB_WC_RECV;
668 wc->pkey_index = pkey_index;
669 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
674 wc->dlid_path_bits = 0;
675 wc->port_num = port_num;
679 * Return 0 if SMP is to be sent
680 * Return 1 if SMP was consumed locally (whether or not solicited)
681 * Return < 0 if error
683 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
684 struct ib_mad_send_wr_private *mad_send_wr)
687 struct ib_smp *smp = mad_send_wr->send_buf.mad;
689 struct ib_mad_local_private *local;
690 struct ib_mad_private *mad_priv;
691 struct ib_mad_port_private *port_priv;
692 struct ib_mad_agent_private *recv_mad_agent = NULL;
693 struct ib_device *device = mad_agent_priv->agent.device;
696 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
698 if (device->node_type == RDMA_NODE_IB_SWITCH)
699 port_num = send_wr->wr.ud.port_num;
701 port_num = mad_agent_priv->agent.port_num;
704 * Directed route handling starts if the initial LID routed part of
705 * a request or the ending LID routed part of a response is empty.
706 * If we are at the start of the LID routed part, don't update the
707 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
709 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) !=
712 if (smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
715 printk(KERN_ERR PFX "Invalid directed route\n");
719 /* Check to post send on QP or process locally */
720 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
721 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
724 local = kmalloc(sizeof *local, GFP_ATOMIC);
727 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
730 local->mad_priv = NULL;
731 local->recv_mad_agent = NULL;
732 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
735 printk(KERN_ERR PFX "No memory for local response MAD\n");
740 build_smp_wc(mad_agent_priv->agent.qp,
741 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
742 send_wr->wr.ud.pkey_index,
743 send_wr->wr.ud.port_num, &mad_wc);
745 /* No GRH for DR SMP */
746 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
747 (struct ib_mad *)smp,
748 (struct ib_mad *)&mad_priv->mad);
751 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
752 if (ib_response_mad(&mad_priv->mad.mad) &&
753 mad_agent_priv->agent.recv_handler) {
754 local->mad_priv = mad_priv;
755 local->recv_mad_agent = mad_agent_priv;
757 * Reference MAD agent until receive
758 * side of local completion handled
760 atomic_inc(&mad_agent_priv->refcount);
762 kmem_cache_free(ib_mad_cache, mad_priv);
764 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
765 kmem_cache_free(ib_mad_cache, mad_priv);
767 case IB_MAD_RESULT_SUCCESS:
768 /* Treat like an incoming receive MAD */
769 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
770 mad_agent_priv->agent.port_num);
772 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
773 recv_mad_agent = find_mad_agent(port_priv,
776 if (!port_priv || !recv_mad_agent) {
778 * No receiving agent so drop packet and
779 * generate send completion.
781 kmem_cache_free(ib_mad_cache, mad_priv);
784 local->mad_priv = mad_priv;
785 local->recv_mad_agent = recv_mad_agent;
788 kmem_cache_free(ib_mad_cache, mad_priv);
794 local->mad_send_wr = mad_send_wr;
795 /* Reference MAD agent until send side of local completion handled */
796 atomic_inc(&mad_agent_priv->refcount);
797 /* Queue local completion to local list */
798 spin_lock_irqsave(&mad_agent_priv->lock, flags);
799 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
800 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
801 queue_work(mad_agent_priv->qp_info->port_priv->wq,
802 &mad_agent_priv->local_work);
808 static int get_pad_size(int hdr_len, int data_len)
812 seg_size = sizeof(struct ib_mad) - hdr_len;
813 if (data_len && seg_size) {
814 pad = seg_size - data_len % seg_size;
815 return pad == seg_size ? 0 : pad;
820 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
822 struct ib_rmpp_segment *s, *t;
824 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
830 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
833 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
834 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
835 struct ib_rmpp_segment *seg = NULL;
836 int left, seg_size, pad;
838 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
839 seg_size = send_buf->seg_size;
842 /* Allocate data segments. */
843 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
844 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
846 printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
847 "alloc failed for len %zd, gfp %#x\n",
848 sizeof (*seg) + seg_size, gfp_mask);
849 free_send_rmpp_list(send_wr);
852 seg->num = ++send_buf->seg_count;
853 list_add_tail(&seg->list, &send_wr->rmpp_list);
856 /* Zero any padding */
858 memset(seg->data + seg_size - pad, 0, pad);
860 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
862 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
863 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
865 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
866 struct ib_rmpp_segment, list);
867 send_wr->last_ack_seg = send_wr->cur_seg;
871 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
872 u32 remote_qpn, u16 pkey_index,
874 int hdr_len, int data_len,
877 struct ib_mad_agent_private *mad_agent_priv;
878 struct ib_mad_send_wr_private *mad_send_wr;
879 int pad, message_size, ret, size;
882 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
884 pad = get_pad_size(hdr_len, data_len);
885 message_size = hdr_len + data_len + pad;
887 if ((!mad_agent->rmpp_version &&
888 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
889 (!rmpp_active && message_size > sizeof(struct ib_mad)))
890 return ERR_PTR(-EINVAL);
892 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
893 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
895 return ERR_PTR(-ENOMEM);
897 mad_send_wr = buf + size;
898 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
899 mad_send_wr->send_buf.mad = buf;
900 mad_send_wr->send_buf.hdr_len = hdr_len;
901 mad_send_wr->send_buf.data_len = data_len;
902 mad_send_wr->pad = pad;
904 mad_send_wr->mad_agent_priv = mad_agent_priv;
905 mad_send_wr->sg_list[0].length = hdr_len;
906 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
907 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
908 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
910 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
911 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
912 mad_send_wr->send_wr.num_sge = 2;
913 mad_send_wr->send_wr.opcode = IB_WR_SEND;
914 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
915 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
916 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
917 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
920 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
927 mad_send_wr->send_buf.mad_agent = mad_agent;
928 atomic_inc(&mad_agent_priv->refcount);
929 return &mad_send_wr->send_buf;
931 EXPORT_SYMBOL(ib_create_send_mad);
933 int ib_get_mad_data_offset(u8 mgmt_class)
935 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
936 return IB_MGMT_SA_HDR;
937 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
938 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
939 (mgmt_class == IB_MGMT_CLASS_BIS))
940 return IB_MGMT_DEVICE_HDR;
941 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
942 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
943 return IB_MGMT_VENDOR_HDR;
945 return IB_MGMT_MAD_HDR;
947 EXPORT_SYMBOL(ib_get_mad_data_offset);
949 int ib_is_mad_class_rmpp(u8 mgmt_class)
951 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
952 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
953 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
954 (mgmt_class == IB_MGMT_CLASS_BIS) ||
955 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
956 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
960 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
962 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
964 struct ib_mad_send_wr_private *mad_send_wr;
965 struct list_head *list;
967 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
969 list = &mad_send_wr->cur_seg->list;
971 if (mad_send_wr->cur_seg->num < seg_num) {
972 list_for_each_entry(mad_send_wr->cur_seg, list, list)
973 if (mad_send_wr->cur_seg->num == seg_num)
975 } else if (mad_send_wr->cur_seg->num > seg_num) {
976 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
977 if (mad_send_wr->cur_seg->num == seg_num)
980 return mad_send_wr->cur_seg->data;
982 EXPORT_SYMBOL(ib_get_rmpp_segment);
984 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
986 if (mad_send_wr->send_buf.seg_count)
987 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
988 mad_send_wr->seg_num);
990 return mad_send_wr->send_buf.mad +
991 mad_send_wr->send_buf.hdr_len;
994 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
996 struct ib_mad_agent_private *mad_agent_priv;
997 struct ib_mad_send_wr_private *mad_send_wr;
999 mad_agent_priv = container_of(send_buf->mad_agent,
1000 struct ib_mad_agent_private, agent);
1001 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1004 free_send_rmpp_list(mad_send_wr);
1005 kfree(send_buf->mad);
1006 deref_mad_agent(mad_agent_priv);
1008 EXPORT_SYMBOL(ib_free_send_mad);
1010 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1012 struct ib_mad_qp_info *qp_info;
1013 struct list_head *list;
1014 struct ib_send_wr *bad_send_wr;
1015 struct ib_mad_agent *mad_agent;
1017 unsigned long flags;
1020 /* Set WR ID to find mad_send_wr upon completion */
1021 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1022 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1023 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1025 mad_agent = mad_send_wr->send_buf.mad_agent;
1026 sge = mad_send_wr->sg_list;
1027 sge[0].addr = ib_dma_map_single(mad_agent->device,
1028 mad_send_wr->send_buf.mad,
1031 mad_send_wr->header_mapping = sge[0].addr;
1033 sge[1].addr = ib_dma_map_single(mad_agent->device,
1034 ib_get_payload(mad_send_wr),
1037 mad_send_wr->payload_mapping = sge[1].addr;
1039 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1040 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1041 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1043 list = &qp_info->send_queue.list;
1046 list = &qp_info->overflow_list;
1050 qp_info->send_queue.count++;
1051 list_add_tail(&mad_send_wr->mad_list.list, list);
1053 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1055 ib_dma_unmap_single(mad_agent->device,
1056 mad_send_wr->header_mapping,
1057 sge[0].length, DMA_TO_DEVICE);
1058 ib_dma_unmap_single(mad_agent->device,
1059 mad_send_wr->payload_mapping,
1060 sge[1].length, DMA_TO_DEVICE);
1066 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1067 * with the registered client
1069 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1070 struct ib_mad_send_buf **bad_send_buf)
1072 struct ib_mad_agent_private *mad_agent_priv;
1073 struct ib_mad_send_buf *next_send_buf;
1074 struct ib_mad_send_wr_private *mad_send_wr;
1075 unsigned long flags;
1078 /* Walk list of send WRs and post each on send list */
1079 for (; send_buf; send_buf = next_send_buf) {
1081 mad_send_wr = container_of(send_buf,
1082 struct ib_mad_send_wr_private,
1084 mad_agent_priv = mad_send_wr->mad_agent_priv;
1086 if (!send_buf->mad_agent->send_handler ||
1087 (send_buf->timeout_ms &&
1088 !send_buf->mad_agent->recv_handler)) {
1093 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1094 if (mad_agent_priv->agent.rmpp_version) {
1101 * Save pointer to next work request to post in case the
1102 * current one completes, and the user modifies the work
1103 * request associated with the completion
1105 next_send_buf = send_buf->next;
1106 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1108 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1109 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1110 ret = handle_outgoing_dr_smp(mad_agent_priv,
1112 if (ret < 0) /* error */
1114 else if (ret == 1) /* locally consumed */
1118 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1119 /* Timeout will be updated after send completes */
1120 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1121 mad_send_wr->max_retries = send_buf->retries;
1122 mad_send_wr->retries_left = send_buf->retries;
1123 send_buf->retries = 0;
1124 /* Reference for work request to QP + response */
1125 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1126 mad_send_wr->status = IB_WC_SUCCESS;
1128 /* Reference MAD agent until send completes */
1129 atomic_inc(&mad_agent_priv->refcount);
1130 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1131 list_add_tail(&mad_send_wr->agent_list,
1132 &mad_agent_priv->send_list);
1133 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1135 if (mad_agent_priv->agent.rmpp_version) {
1136 ret = ib_send_rmpp_mad(mad_send_wr);
1137 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1138 ret = ib_send_mad(mad_send_wr);
1140 ret = ib_send_mad(mad_send_wr);
1142 /* Fail send request */
1143 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1144 list_del(&mad_send_wr->agent_list);
1145 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1146 atomic_dec(&mad_agent_priv->refcount);
1153 *bad_send_buf = send_buf;
1156 EXPORT_SYMBOL(ib_post_send_mad);
1159 * ib_free_recv_mad - Returns data buffers used to receive
1160 * a MAD to the access layer
1162 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1164 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1165 struct ib_mad_private_header *mad_priv_hdr;
1166 struct ib_mad_private *priv;
1167 struct list_head free_list;
1169 INIT_LIST_HEAD(&free_list);
1170 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1172 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1174 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1176 mad_priv_hdr = container_of(mad_recv_wc,
1177 struct ib_mad_private_header,
1179 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1181 kmem_cache_free(ib_mad_cache, priv);
1184 EXPORT_SYMBOL(ib_free_recv_mad);
1186 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1188 ib_mad_send_handler send_handler,
1189 ib_mad_recv_handler recv_handler,
1192 return ERR_PTR(-EINVAL); /* XXX: for now */
1194 EXPORT_SYMBOL(ib_redirect_mad_qp);
1196 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1199 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1202 EXPORT_SYMBOL(ib_process_mad_wc);
1204 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1205 struct ib_mad_reg_req *mad_reg_req)
1209 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1210 i < IB_MGMT_MAX_METHODS;
1211 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1213 if ((*method)->agent[i]) {
1214 printk(KERN_ERR PFX "Method %d already in use\n", i);
1221 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1223 /* Allocate management method table */
1224 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1226 printk(KERN_ERR PFX "No memory for "
1227 "ib_mad_mgmt_method_table\n");
1235 * Check to see if there are any methods still in use
1237 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1241 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1242 if (method->agent[i])
1248 * Check to see if there are any method tables for this class still in use
1250 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1254 for (i = 0; i < MAX_MGMT_CLASS; i++)
1255 if (class->method_table[i])
1260 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1264 for (i = 0; i < MAX_MGMT_OUI; i++)
1265 if (vendor_class->method_table[i])
1270 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1275 for (i = 0; i < MAX_MGMT_OUI; i++)
1276 /* Is there matching OUI for this vendor class ? */
1277 if (!memcmp(vendor_class->oui[i], oui, 3))
1283 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1287 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1288 if (vendor->vendor_class[i])
1294 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1295 struct ib_mad_agent_private *agent)
1299 /* Remove any methods for this mad agent */
1300 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1301 if (method->agent[i] == agent) {
1302 method->agent[i] = NULL;
1307 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1308 struct ib_mad_agent_private *agent_priv,
1311 struct ib_mad_port_private *port_priv;
1312 struct ib_mad_mgmt_class_table **class;
1313 struct ib_mad_mgmt_method_table **method;
1316 port_priv = agent_priv->qp_info->port_priv;
1317 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1319 /* Allocate management class table for "new" class version */
1320 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1322 printk(KERN_ERR PFX "No memory for "
1323 "ib_mad_mgmt_class_table\n");
1328 /* Allocate method table for this management class */
1329 method = &(*class)->method_table[mgmt_class];
1330 if ((ret = allocate_method_table(method)))
1333 method = &(*class)->method_table[mgmt_class];
1335 /* Allocate method table for this management class */
1336 if ((ret = allocate_method_table(method)))
1341 /* Now, make sure methods are not already in use */
1342 if (method_in_use(method, mad_reg_req))
1345 /* Finally, add in methods being registered */
1346 for (i = find_first_bit(mad_reg_req->method_mask,
1347 IB_MGMT_MAX_METHODS);
1348 i < IB_MGMT_MAX_METHODS;
1349 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1351 (*method)->agent[i] = agent_priv;
1356 /* Remove any methods for this mad agent */
1357 remove_methods_mad_agent(*method, agent_priv);
1358 /* Now, check to see if there are any methods in use */
1359 if (!check_method_table(*method)) {
1360 /* If not, release management method table */
1373 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1374 struct ib_mad_agent_private *agent_priv)
1376 struct ib_mad_port_private *port_priv;
1377 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1378 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1379 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1380 struct ib_mad_mgmt_method_table **method;
1381 int i, ret = -ENOMEM;
1384 /* "New" vendor (with OUI) class */
1385 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1386 port_priv = agent_priv->qp_info->port_priv;
1387 vendor_table = &port_priv->version[
1388 mad_reg_req->mgmt_class_version].vendor;
1389 if (!*vendor_table) {
1390 /* Allocate mgmt vendor class table for "new" class version */
1391 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1393 printk(KERN_ERR PFX "No memory for "
1394 "ib_mad_mgmt_vendor_class_table\n");
1398 *vendor_table = vendor;
1400 if (!(*vendor_table)->vendor_class[vclass]) {
1401 /* Allocate table for this management vendor class */
1402 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1403 if (!vendor_class) {
1404 printk(KERN_ERR PFX "No memory for "
1405 "ib_mad_mgmt_vendor_class\n");
1409 (*vendor_table)->vendor_class[vclass] = vendor_class;
1411 for (i = 0; i < MAX_MGMT_OUI; i++) {
1412 /* Is there matching OUI for this vendor class ? */
1413 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1414 mad_reg_req->oui, 3)) {
1415 method = &(*vendor_table)->vendor_class[
1416 vclass]->method_table[i];
1421 for (i = 0; i < MAX_MGMT_OUI; i++) {
1422 /* OUI slot available ? */
1423 if (!is_vendor_oui((*vendor_table)->vendor_class[
1425 method = &(*vendor_table)->vendor_class[
1426 vclass]->method_table[i];
1428 /* Allocate method table for this OUI */
1429 if ((ret = allocate_method_table(method)))
1431 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1432 mad_reg_req->oui, 3);
1436 printk(KERN_ERR PFX "All OUI slots in use\n");
1440 /* Now, make sure methods are not already in use */
1441 if (method_in_use(method, mad_reg_req))
1444 /* Finally, add in methods being registered */
1445 for (i = find_first_bit(mad_reg_req->method_mask,
1446 IB_MGMT_MAX_METHODS);
1447 i < IB_MGMT_MAX_METHODS;
1448 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1450 (*method)->agent[i] = agent_priv;
1455 /* Remove any methods for this mad agent */
1456 remove_methods_mad_agent(*method, agent_priv);
1457 /* Now, check to see if there are any methods in use */
1458 if (!check_method_table(*method)) {
1459 /* If not, release management method table */
1466 (*vendor_table)->vendor_class[vclass] = NULL;
1467 kfree(vendor_class);
1471 *vendor_table = NULL;
1478 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1480 struct ib_mad_port_private *port_priv;
1481 struct ib_mad_mgmt_class_table *class;
1482 struct ib_mad_mgmt_method_table *method;
1483 struct ib_mad_mgmt_vendor_class_table *vendor;
1484 struct ib_mad_mgmt_vendor_class *vendor_class;
1489 * Was MAD registration request supplied
1490 * with original registration ?
1492 if (!agent_priv->reg_req) {
1496 port_priv = agent_priv->qp_info->port_priv;
1497 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1498 class = port_priv->version[
1499 agent_priv->reg_req->mgmt_class_version].class;
1503 method = class->method_table[mgmt_class];
1505 /* Remove any methods for this mad agent */
1506 remove_methods_mad_agent(method, agent_priv);
1507 /* Now, check to see if there are any methods still in use */
1508 if (!check_method_table(method)) {
1509 /* If not, release management method table */
1511 class->method_table[mgmt_class] = NULL;
1512 /* Any management classes left ? */
1513 if (!check_class_table(class)) {
1514 /* If not, release management class table */
1517 agent_priv->reg_req->
1518 mgmt_class_version].class = NULL;
1524 if (!is_vendor_class(mgmt_class))
1527 /* normalize mgmt_class to vendor range 2 */
1528 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1529 vendor = port_priv->version[
1530 agent_priv->reg_req->mgmt_class_version].vendor;
1535 vendor_class = vendor->vendor_class[mgmt_class];
1537 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1540 method = vendor_class->method_table[index];
1542 /* Remove any methods for this mad agent */
1543 remove_methods_mad_agent(method, agent_priv);
1545 * Now, check to see if there are
1546 * any methods still in use
1548 if (!check_method_table(method)) {
1549 /* If not, release management method table */
1551 vendor_class->method_table[index] = NULL;
1552 memset(vendor_class->oui[index], 0, 3);
1553 /* Any OUIs left ? */
1554 if (!check_vendor_class(vendor_class)) {
1555 /* If not, release vendor class table */
1556 kfree(vendor_class);
1557 vendor->vendor_class[mgmt_class] = NULL;
1558 /* Any other vendor classes left ? */
1559 if (!check_vendor_table(vendor)) {
1562 agent_priv->reg_req->
1563 mgmt_class_version].
1575 static struct ib_mad_agent_private *
1576 find_mad_agent(struct ib_mad_port_private *port_priv,
1579 struct ib_mad_agent_private *mad_agent = NULL;
1580 unsigned long flags;
1582 spin_lock_irqsave(&port_priv->reg_lock, flags);
1583 if (ib_response_mad(mad)) {
1585 struct ib_mad_agent_private *entry;
1588 * Routing is based on high 32 bits of transaction ID
1591 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1592 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1593 if (entry->agent.hi_tid == hi_tid) {
1599 struct ib_mad_mgmt_class_table *class;
1600 struct ib_mad_mgmt_method_table *method;
1601 struct ib_mad_mgmt_vendor_class_table *vendor;
1602 struct ib_mad_mgmt_vendor_class *vendor_class;
1603 struct ib_vendor_mad *vendor_mad;
1607 * Routing is based on version, class, and method
1608 * For "newer" vendor MADs, also based on OUI
1610 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1612 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1613 class = port_priv->version[
1614 mad->mad_hdr.class_version].class;
1617 method = class->method_table[convert_mgmt_class(
1618 mad->mad_hdr.mgmt_class)];
1620 mad_agent = method->agent[mad->mad_hdr.method &
1621 ~IB_MGMT_METHOD_RESP];
1623 vendor = port_priv->version[
1624 mad->mad_hdr.class_version].vendor;
1627 vendor_class = vendor->vendor_class[vendor_class_index(
1628 mad->mad_hdr.mgmt_class)];
1631 /* Find matching OUI */
1632 vendor_mad = (struct ib_vendor_mad *)mad;
1633 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1636 method = vendor_class->method_table[index];
1638 mad_agent = method->agent[mad->mad_hdr.method &
1639 ~IB_MGMT_METHOD_RESP];
1645 if (mad_agent->agent.recv_handler)
1646 atomic_inc(&mad_agent->refcount);
1648 printk(KERN_NOTICE PFX "No receive handler for client "
1650 &mad_agent->agent, port_priv->port_num);
1655 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1660 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1664 /* Make sure MAD base version is understood */
1665 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1666 printk(KERN_ERR PFX "MAD received with unsupported base "
1667 "version %d\n", mad->mad_hdr.base_version);
1671 /* Filter SMI packets sent to other than QP0 */
1672 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1673 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1677 /* Filter GSI packets sent to QP0 */
1686 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1687 struct ib_mad_hdr *mad_hdr)
1689 struct ib_rmpp_mad *rmpp_mad;
1691 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1692 return !mad_agent_priv->agent.rmpp_version ||
1693 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1694 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1695 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1698 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1699 struct ib_mad_recv_wc *rwc)
1701 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1702 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1705 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1706 struct ib_mad_send_wr_private *wr,
1707 struct ib_mad_recv_wc *rwc )
1709 struct ib_ah_attr attr;
1710 u8 send_resp, rcv_resp;
1712 struct ib_device *device = mad_agent_priv->agent.device;
1713 u8 port_num = mad_agent_priv->agent.port_num;
1716 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1717 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1719 if (send_resp == rcv_resp)
1720 /* both requests, or both responses. GIDs different */
1723 if (ib_query_ah(wr->send_buf.ah, &attr))
1724 /* Assume not equal, to avoid false positives. */
1727 if (!!(attr.ah_flags & IB_AH_GRH) !=
1728 !!(rwc->wc->wc_flags & IB_WC_GRH))
1729 /* one has GID, other does not. Assume different */
1732 if (!send_resp && rcv_resp) {
1733 /* is request/response. */
1734 if (!(attr.ah_flags & IB_AH_GRH)) {
1735 if (ib_get_cached_lmc(device, port_num, &lmc))
1737 return (!lmc || !((attr.src_path_bits ^
1738 rwc->wc->dlid_path_bits) &
1741 if (ib_get_cached_gid(device, port_num,
1742 attr.grh.sgid_index, &sgid))
1744 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1749 if (!(attr.ah_flags & IB_AH_GRH))
1750 return attr.dlid == rwc->wc->slid;
1752 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1756 static inline int is_direct(u8 class)
1758 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1761 struct ib_mad_send_wr_private*
1762 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1763 struct ib_mad_recv_wc *wc)
1765 struct ib_mad_send_wr_private *wr;
1768 mad = (struct ib_mad *)wc->recv_buf.mad;
1770 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1771 if ((wr->tid == mad->mad_hdr.tid) &&
1772 rcv_has_same_class(wr, wc) &&
1774 * Don't check GID for direct routed MADs.
1775 * These might have permissive LIDs.
1777 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1778 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1779 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1783 * It's possible to receive the response before we've
1784 * been notified that the send has completed
1786 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1787 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1788 wr->tid == mad->mad_hdr.tid &&
1790 rcv_has_same_class(wr, wc) &&
1792 * Don't check GID for direct routed MADs.
1793 * These might have permissive LIDs.
1795 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1796 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1797 /* Verify request has not been canceled */
1798 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1803 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1805 mad_send_wr->timeout = 0;
1806 if (mad_send_wr->refcount == 1)
1807 list_move_tail(&mad_send_wr->agent_list,
1808 &mad_send_wr->mad_agent_priv->done_list);
1811 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1812 struct ib_mad_recv_wc *mad_recv_wc)
1814 struct ib_mad_send_wr_private *mad_send_wr;
1815 struct ib_mad_send_wc mad_send_wc;
1816 unsigned long flags;
1818 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1819 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1820 if (mad_agent_priv->agent.rmpp_version) {
1821 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1824 deref_mad_agent(mad_agent_priv);
1829 /* Complete corresponding request */
1830 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1831 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1832 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1834 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1835 ib_free_recv_mad(mad_recv_wc);
1836 deref_mad_agent(mad_agent_priv);
1839 ib_mark_mad_done(mad_send_wr);
1840 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1842 /* Defined behavior is to complete response before request */
1843 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1844 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1846 atomic_dec(&mad_agent_priv->refcount);
1848 mad_send_wc.status = IB_WC_SUCCESS;
1849 mad_send_wc.vendor_err = 0;
1850 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1851 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1853 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1855 deref_mad_agent(mad_agent_priv);
1859 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1862 struct ib_mad_qp_info *qp_info;
1863 struct ib_mad_private_header *mad_priv_hdr;
1864 struct ib_mad_private *recv, *response = NULL;
1865 struct ib_mad_list_head *mad_list;
1866 struct ib_mad_agent_private *mad_agent;
1869 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1870 qp_info = mad_list->mad_queue->qp_info;
1871 dequeue_mad(mad_list);
1873 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1875 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1876 ib_dma_unmap_single(port_priv->device,
1877 recv->header.mapping,
1878 sizeof(struct ib_mad_private) -
1879 sizeof(struct ib_mad_private_header),
1882 /* Setup MAD receive work completion from "normal" work completion */
1883 recv->header.wc = *wc;
1884 recv->header.recv_wc.wc = &recv->header.wc;
1885 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1886 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1887 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1889 if (atomic_read(&qp_info->snoop_count))
1890 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1893 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1896 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1898 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1899 "for response buffer\n");
1903 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1904 port_num = wc->port_num;
1906 port_num = port_priv->port_num;
1908 if (recv->mad.mad.mad_hdr.mgmt_class ==
1909 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1910 enum smi_forward_action retsmi;
1912 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1913 port_priv->device->node_type,
1915 port_priv->device->phys_port_cnt) ==
1919 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1920 if (retsmi == IB_SMI_LOCAL)
1923 if (retsmi == IB_SMI_SEND) { /* don't forward */
1924 if (smi_handle_dr_smp_send(&recv->mad.smp,
1925 port_priv->device->node_type,
1926 port_num) == IB_SMI_DISCARD)
1929 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1931 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1932 /* forward case for switches */
1933 memcpy(response, recv, sizeof(*response));
1934 response->header.recv_wc.wc = &response->header.wc;
1935 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1936 response->header.recv_wc.recv_buf.grh = &response->grh;
1938 agent_send_response(&response->mad.mad,
1941 smi_get_fwd_port(&recv->mad.smp),
1942 qp_info->qp->qp_num);
1949 /* Give driver "right of first refusal" on incoming MAD */
1950 if (port_priv->device->process_mad) {
1953 ret = port_priv->device->process_mad(port_priv->device, 0,
1954 port_priv->port_num,
1957 &response->mad.mad);
1958 if (ret & IB_MAD_RESULT_SUCCESS) {
1959 if (ret & IB_MAD_RESULT_CONSUMED)
1961 if (ret & IB_MAD_RESULT_REPLY) {
1962 agent_send_response(&response->mad.mad,
1966 qp_info->qp->qp_num);
1972 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1974 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1976 * recv is freed up in error cases in ib_mad_complete_recv
1977 * or via recv_handler in ib_mad_complete_recv()
1983 /* Post another receive request for this QP */
1985 ib_mad_post_receive_mads(qp_info, response);
1987 kmem_cache_free(ib_mad_cache, recv);
1989 ib_mad_post_receive_mads(qp_info, recv);
1992 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1994 struct ib_mad_send_wr_private *mad_send_wr;
1996 if (list_empty(&mad_agent_priv->wait_list)) {
1997 del_timer(&mad_agent_priv->timeout_timer);
1999 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2000 struct ib_mad_send_wr_private,
2003 if (time_after(mad_agent_priv->timeout,
2004 mad_send_wr->timeout)) {
2005 mad_agent_priv->timeout = mad_send_wr->timeout;
2006 mod_timer(&mad_agent_priv->timeout_timer,
2007 mad_send_wr->timeout);
2012 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2014 struct ib_mad_agent_private *mad_agent_priv;
2015 struct ib_mad_send_wr_private *temp_mad_send_wr;
2016 struct list_head *list_item;
2017 unsigned long delay;
2019 mad_agent_priv = mad_send_wr->mad_agent_priv;
2020 list_del(&mad_send_wr->agent_list);
2022 delay = mad_send_wr->timeout;
2023 mad_send_wr->timeout += jiffies;
2026 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2027 temp_mad_send_wr = list_entry(list_item,
2028 struct ib_mad_send_wr_private,
2030 if (time_after(mad_send_wr->timeout,
2031 temp_mad_send_wr->timeout))
2035 list_item = &mad_agent_priv->wait_list;
2036 list_add(&mad_send_wr->agent_list, list_item);
2038 /* Reschedule a work item if we have a shorter timeout */
2039 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2040 mod_timer(&mad_agent_priv->timeout_timer,
2041 mad_send_wr->timeout);
2044 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2047 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2048 wait_for_response(mad_send_wr);
2052 * Process a send work completion
2054 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2055 struct ib_mad_send_wc *mad_send_wc)
2057 struct ib_mad_agent_private *mad_agent_priv;
2058 unsigned long flags;
2061 mad_agent_priv = mad_send_wr->mad_agent_priv;
2062 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2063 if (mad_agent_priv->agent.rmpp_version) {
2064 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2065 if (ret == IB_RMPP_RESULT_CONSUMED)
2068 ret = IB_RMPP_RESULT_UNHANDLED;
2070 if (mad_send_wc->status != IB_WC_SUCCESS &&
2071 mad_send_wr->status == IB_WC_SUCCESS) {
2072 mad_send_wr->status = mad_send_wc->status;
2073 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2076 if (--mad_send_wr->refcount > 0) {
2077 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2078 mad_send_wr->status == IB_WC_SUCCESS) {
2079 wait_for_response(mad_send_wr);
2084 /* Remove send from MAD agent and notify client of completion */
2085 list_del(&mad_send_wr->agent_list);
2086 adjust_timeout(mad_agent_priv);
2087 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2089 if (mad_send_wr->status != IB_WC_SUCCESS )
2090 mad_send_wc->status = mad_send_wr->status;
2091 if (ret == IB_RMPP_RESULT_INTERNAL)
2092 ib_rmpp_send_handler(mad_send_wc);
2094 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2097 /* Release reference on agent taken when sending */
2098 deref_mad_agent(mad_agent_priv);
2101 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2104 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2107 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2108 struct ib_mad_list_head *mad_list;
2109 struct ib_mad_qp_info *qp_info;
2110 struct ib_mad_queue *send_queue;
2111 struct ib_send_wr *bad_send_wr;
2112 struct ib_mad_send_wc mad_send_wc;
2113 unsigned long flags;
2116 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2117 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2119 send_queue = mad_list->mad_queue;
2120 qp_info = send_queue->qp_info;
2123 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2124 mad_send_wr->header_mapping,
2125 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2126 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2127 mad_send_wr->payload_mapping,
2128 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2129 queued_send_wr = NULL;
2130 spin_lock_irqsave(&send_queue->lock, flags);
2131 list_del(&mad_list->list);
2133 /* Move queued send to the send queue */
2134 if (send_queue->count-- > send_queue->max_active) {
2135 mad_list = container_of(qp_info->overflow_list.next,
2136 struct ib_mad_list_head, list);
2137 queued_send_wr = container_of(mad_list,
2138 struct ib_mad_send_wr_private,
2140 list_move_tail(&mad_list->list, &send_queue->list);
2142 spin_unlock_irqrestore(&send_queue->lock, flags);
2144 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2145 mad_send_wc.status = wc->status;
2146 mad_send_wc.vendor_err = wc->vendor_err;
2147 if (atomic_read(&qp_info->snoop_count))
2148 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2149 IB_MAD_SNOOP_SEND_COMPLETIONS);
2150 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2152 if (queued_send_wr) {
2153 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2156 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2157 mad_send_wr = queued_send_wr;
2158 wc->status = IB_WC_LOC_QP_OP_ERR;
2164 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2166 struct ib_mad_send_wr_private *mad_send_wr;
2167 struct ib_mad_list_head *mad_list;
2168 unsigned long flags;
2170 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2171 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2172 mad_send_wr = container_of(mad_list,
2173 struct ib_mad_send_wr_private,
2175 mad_send_wr->retry = 1;
2177 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2180 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2183 struct ib_mad_list_head *mad_list;
2184 struct ib_mad_qp_info *qp_info;
2185 struct ib_mad_send_wr_private *mad_send_wr;
2188 /* Determine if failure was a send or receive */
2189 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2190 qp_info = mad_list->mad_queue->qp_info;
2191 if (mad_list->mad_queue == &qp_info->recv_queue)
2193 * Receive errors indicate that the QP has entered the error
2194 * state - error handling/shutdown code will cleanup
2199 * Send errors will transition the QP to SQE - move
2200 * QP to RTS and repost flushed work requests
2202 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2204 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2205 if (mad_send_wr->retry) {
2207 struct ib_send_wr *bad_send_wr;
2209 mad_send_wr->retry = 0;
2210 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2213 ib_mad_send_done_handler(port_priv, wc);
2215 ib_mad_send_done_handler(port_priv, wc);
2217 struct ib_qp_attr *attr;
2219 /* Transition QP to RTS and fail offending send */
2220 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2222 attr->qp_state = IB_QPS_RTS;
2223 attr->cur_qp_state = IB_QPS_SQE;
2224 ret = ib_modify_qp(qp_info->qp, attr,
2225 IB_QP_STATE | IB_QP_CUR_STATE);
2228 printk(KERN_ERR PFX "mad_error_handler - "
2229 "ib_modify_qp to RTS : %d\n", ret);
2231 mark_sends_for_retry(qp_info);
2233 ib_mad_send_done_handler(port_priv, wc);
2238 * IB MAD completion callback
2240 static void ib_mad_completion_handler(struct work_struct *work)
2242 struct ib_mad_port_private *port_priv;
2245 port_priv = container_of(work, struct ib_mad_port_private, work);
2246 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2248 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2249 if (wc.status == IB_WC_SUCCESS) {
2250 switch (wc.opcode) {
2252 ib_mad_send_done_handler(port_priv, &wc);
2255 ib_mad_recv_done_handler(port_priv, &wc);
2262 mad_error_handler(port_priv, &wc);
2266 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2268 unsigned long flags;
2269 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2270 struct ib_mad_send_wc mad_send_wc;
2271 struct list_head cancel_list;
2273 INIT_LIST_HEAD(&cancel_list);
2275 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2276 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2277 &mad_agent_priv->send_list, agent_list) {
2278 if (mad_send_wr->status == IB_WC_SUCCESS) {
2279 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2280 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2284 /* Empty wait list to prevent receives from finding a request */
2285 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2286 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2288 /* Report all cancelled requests */
2289 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2290 mad_send_wc.vendor_err = 0;
2292 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2293 &cancel_list, agent_list) {
2294 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2295 list_del(&mad_send_wr->agent_list);
2296 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2298 atomic_dec(&mad_agent_priv->refcount);
2302 static struct ib_mad_send_wr_private*
2303 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2304 struct ib_mad_send_buf *send_buf)
2306 struct ib_mad_send_wr_private *mad_send_wr;
2308 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2310 if (&mad_send_wr->send_buf == send_buf)
2314 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2316 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2317 &mad_send_wr->send_buf == send_buf)
2323 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2324 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2326 struct ib_mad_agent_private *mad_agent_priv;
2327 struct ib_mad_send_wr_private *mad_send_wr;
2328 unsigned long flags;
2331 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2333 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2334 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2335 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2336 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2340 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2342 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2343 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2346 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2348 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2350 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2352 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2355 EXPORT_SYMBOL(ib_modify_mad);
2357 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2358 struct ib_mad_send_buf *send_buf)
2360 ib_modify_mad(mad_agent, send_buf, 0);
2362 EXPORT_SYMBOL(ib_cancel_mad);
2364 static void local_completions(struct work_struct *work)
2366 struct ib_mad_agent_private *mad_agent_priv;
2367 struct ib_mad_local_private *local;
2368 struct ib_mad_agent_private *recv_mad_agent;
2369 unsigned long flags;
2372 struct ib_mad_send_wc mad_send_wc;
2375 container_of(work, struct ib_mad_agent_private, local_work);
2377 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2378 while (!list_empty(&mad_agent_priv->local_list)) {
2379 local = list_entry(mad_agent_priv->local_list.next,
2380 struct ib_mad_local_private,
2382 list_del(&local->completion_list);
2383 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2385 if (local->mad_priv) {
2386 recv_mad_agent = local->recv_mad_agent;
2387 if (!recv_mad_agent) {
2388 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2390 goto local_send_completion;
2394 * Defined behavior is to complete response
2397 build_smp_wc(recv_mad_agent->agent.qp,
2398 (unsigned long) local->mad_send_wr,
2399 be16_to_cpu(IB_LID_PERMISSIVE),
2400 0, recv_mad_agent->agent.port_num, &wc);
2402 local->mad_priv->header.recv_wc.wc = &wc;
2403 local->mad_priv->header.recv_wc.mad_len =
2404 sizeof(struct ib_mad);
2405 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2406 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2407 &local->mad_priv->header.recv_wc.rmpp_list);
2408 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2409 local->mad_priv->header.recv_wc.recv_buf.mad =
2410 &local->mad_priv->mad.mad;
2411 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2412 snoop_recv(recv_mad_agent->qp_info,
2413 &local->mad_priv->header.recv_wc,
2414 IB_MAD_SNOOP_RECVS);
2415 recv_mad_agent->agent.recv_handler(
2416 &recv_mad_agent->agent,
2417 &local->mad_priv->header.recv_wc);
2418 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2419 atomic_dec(&recv_mad_agent->refcount);
2420 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2423 local_send_completion:
2425 mad_send_wc.status = IB_WC_SUCCESS;
2426 mad_send_wc.vendor_err = 0;
2427 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2428 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2429 snoop_send(mad_agent_priv->qp_info,
2430 &local->mad_send_wr->send_buf,
2431 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2432 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2435 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2436 atomic_dec(&mad_agent_priv->refcount);
2438 kmem_cache_free(ib_mad_cache, local->mad_priv);
2441 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2444 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2448 if (!mad_send_wr->retries_left)
2451 mad_send_wr->retries_left--;
2452 mad_send_wr->send_buf.retries++;
2454 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2456 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2457 ret = ib_retry_rmpp(mad_send_wr);
2459 case IB_RMPP_RESULT_UNHANDLED:
2460 ret = ib_send_mad(mad_send_wr);
2462 case IB_RMPP_RESULT_CONSUMED:
2470 ret = ib_send_mad(mad_send_wr);
2473 mad_send_wr->refcount++;
2474 list_add_tail(&mad_send_wr->agent_list,
2475 &mad_send_wr->mad_agent_priv->send_list);
2480 static void timeout_sends(struct work_struct *work)
2482 struct ib_mad_agent_private *mad_agent_priv;
2483 struct ib_mad_send_wr_private *mad_send_wr;
2484 struct ib_mad_send_wc mad_send_wc;
2485 unsigned long flags;
2487 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2489 mad_send_wc.vendor_err = 0;
2491 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2492 while (!list_empty(&mad_agent_priv->wait_list)) {
2493 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2494 struct ib_mad_send_wr_private,
2497 if (time_after(mad_send_wr->timeout, jiffies)) {
2498 mod_timer(&mad_agent_priv->timeout_timer,
2499 mad_send_wr->timeout);
2503 list_del(&mad_send_wr->agent_list);
2504 if (mad_send_wr->status == IB_WC_SUCCESS &&
2505 !retry_send(mad_send_wr))
2508 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2510 if (mad_send_wr->status == IB_WC_SUCCESS)
2511 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2513 mad_send_wc.status = mad_send_wr->status;
2514 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2515 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2518 atomic_dec(&mad_agent_priv->refcount);
2519 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2521 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2524 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2526 struct ib_mad_port_private *port_priv = cq->cq_context;
2527 unsigned long flags;
2529 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2530 if (!list_empty(&port_priv->port_list))
2531 queue_work(port_priv->wq, &port_priv->work);
2532 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2536 * Allocate receive MADs and post receive WRs for them
2538 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2539 struct ib_mad_private *mad)
2541 unsigned long flags;
2543 struct ib_mad_private *mad_priv;
2544 struct ib_sge sg_list;
2545 struct ib_recv_wr recv_wr, *bad_recv_wr;
2546 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2548 /* Initialize common scatter list fields */
2549 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2550 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2552 /* Initialize common receive WR fields */
2553 recv_wr.next = NULL;
2554 recv_wr.sg_list = &sg_list;
2555 recv_wr.num_sge = 1;
2558 /* Allocate and map receive buffer */
2563 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2565 printk(KERN_ERR PFX "No memory for receive buffer\n");
2570 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2573 sizeof mad_priv->header,
2575 mad_priv->header.mapping = sg_list.addr;
2576 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2577 mad_priv->header.mad_list.mad_queue = recv_queue;
2579 /* Post receive WR */
2580 spin_lock_irqsave(&recv_queue->lock, flags);
2581 post = (++recv_queue->count < recv_queue->max_active);
2582 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2583 spin_unlock_irqrestore(&recv_queue->lock, flags);
2584 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2586 spin_lock_irqsave(&recv_queue->lock, flags);
2587 list_del(&mad_priv->header.mad_list.list);
2588 recv_queue->count--;
2589 spin_unlock_irqrestore(&recv_queue->lock, flags);
2590 ib_dma_unmap_single(qp_info->port_priv->device,
2591 mad_priv->header.mapping,
2593 sizeof mad_priv->header,
2595 kmem_cache_free(ib_mad_cache, mad_priv);
2596 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2605 * Return all the posted receive MADs
2607 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2609 struct ib_mad_private_header *mad_priv_hdr;
2610 struct ib_mad_private *recv;
2611 struct ib_mad_list_head *mad_list;
2616 while (!list_empty(&qp_info->recv_queue.list)) {
2618 mad_list = list_entry(qp_info->recv_queue.list.next,
2619 struct ib_mad_list_head, list);
2620 mad_priv_hdr = container_of(mad_list,
2621 struct ib_mad_private_header,
2623 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2626 /* Remove from posted receive MAD list */
2627 list_del(&mad_list->list);
2629 ib_dma_unmap_single(qp_info->port_priv->device,
2630 recv->header.mapping,
2631 sizeof(struct ib_mad_private) -
2632 sizeof(struct ib_mad_private_header),
2634 kmem_cache_free(ib_mad_cache, recv);
2637 qp_info->recv_queue.count = 0;
2643 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2646 struct ib_qp_attr *attr;
2649 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2651 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2655 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2656 qp = port_priv->qp_info[i].qp;
2661 * PKey index for QP1 is irrelevant but
2662 * one is needed for the Reset to Init transition
2664 attr->qp_state = IB_QPS_INIT;
2665 attr->pkey_index = 0;
2666 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2667 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2668 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2670 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2671 "INIT: %d\n", i, ret);
2675 attr->qp_state = IB_QPS_RTR;
2676 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2678 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2679 "RTR: %d\n", i, ret);
2683 attr->qp_state = IB_QPS_RTS;
2684 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2685 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2687 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2688 "RTS: %d\n", i, ret);
2693 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2695 printk(KERN_ERR PFX "Failed to request completion "
2696 "notification: %d\n", ret);
2700 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2701 if (!port_priv->qp_info[i].qp)
2704 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2706 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2715 static void qp_event_handler(struct ib_event *event, void *qp_context)
2717 struct ib_mad_qp_info *qp_info = qp_context;
2719 /* It's worse than that! He's dead, Jim! */
2720 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2721 event->event, qp_info->qp->qp_num);
2724 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2725 struct ib_mad_queue *mad_queue)
2727 mad_queue->qp_info = qp_info;
2728 mad_queue->count = 0;
2729 spin_lock_init(&mad_queue->lock);
2730 INIT_LIST_HEAD(&mad_queue->list);
2733 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2734 struct ib_mad_qp_info *qp_info)
2736 qp_info->port_priv = port_priv;
2737 init_mad_queue(qp_info, &qp_info->send_queue);
2738 init_mad_queue(qp_info, &qp_info->recv_queue);
2739 INIT_LIST_HEAD(&qp_info->overflow_list);
2740 spin_lock_init(&qp_info->snoop_lock);
2741 qp_info->snoop_table = NULL;
2742 qp_info->snoop_table_size = 0;
2743 atomic_set(&qp_info->snoop_count, 0);
2746 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2747 enum ib_qp_type qp_type)
2749 struct ib_qp_init_attr qp_init_attr;
2752 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2753 qp_init_attr.send_cq = qp_info->port_priv->cq;
2754 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2755 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2756 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2757 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2758 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2759 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2760 qp_init_attr.qp_type = qp_type;
2761 qp_init_attr.port_num = qp_info->port_priv->port_num;
2762 qp_init_attr.qp_context = qp_info;
2763 qp_init_attr.event_handler = qp_event_handler;
2764 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2765 if (IS_ERR(qp_info->qp)) {
2766 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2767 get_spl_qp_index(qp_type));
2768 ret = PTR_ERR(qp_info->qp);
2771 /* Use minimum queue sizes unless the CQ is resized */
2772 qp_info->send_queue.max_active = mad_sendq_size;
2773 qp_info->recv_queue.max_active = mad_recvq_size;
2780 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2785 ib_destroy_qp(qp_info->qp);
2786 kfree(qp_info->snoop_table);
2791 * Create the QP, PD, MR, and CQ if needed
2793 static int ib_mad_port_open(struct ib_device *device,
2797 struct ib_mad_port_private *port_priv;
2798 unsigned long flags;
2799 char name[sizeof "ib_mad123"];
2802 /* Create new device info */
2803 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2805 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2809 port_priv->device = device;
2810 port_priv->port_num = port_num;
2811 spin_lock_init(&port_priv->reg_lock);
2812 INIT_LIST_HEAD(&port_priv->agent_list);
2813 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2814 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2816 cq_size = mad_sendq_size + mad_recvq_size;
2817 has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2821 port_priv->cq = ib_create_cq(port_priv->device,
2822 ib_mad_thread_completion_handler,
2823 NULL, port_priv, cq_size, 0);
2824 if (IS_ERR(port_priv->cq)) {
2825 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2826 ret = PTR_ERR(port_priv->cq);
2830 port_priv->pd = ib_alloc_pd(device);
2831 if (IS_ERR(port_priv->pd)) {
2832 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2833 ret = PTR_ERR(port_priv->pd);
2837 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2838 if (IS_ERR(port_priv->mr)) {
2839 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2840 ret = PTR_ERR(port_priv->mr);
2845 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2849 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2853 snprintf(name, sizeof name, "ib_mad%d", port_num);
2854 port_priv->wq = create_singlethread_workqueue(name);
2855 if (!port_priv->wq) {
2859 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2861 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2862 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2863 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2865 ret = ib_mad_port_start(port_priv);
2867 printk(KERN_ERR PFX "Couldn't start port\n");
2874 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2875 list_del_init(&port_priv->port_list);
2876 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2878 destroy_workqueue(port_priv->wq);
2880 destroy_mad_qp(&port_priv->qp_info[1]);
2882 destroy_mad_qp(&port_priv->qp_info[0]);
2884 ib_dereg_mr(port_priv->mr);
2886 ib_dealloc_pd(port_priv->pd);
2888 ib_destroy_cq(port_priv->cq);
2889 cleanup_recv_queue(&port_priv->qp_info[1]);
2890 cleanup_recv_queue(&port_priv->qp_info[0]);
2899 * If there are no classes using the port, free the port
2900 * resources (CQ, MR, PD, QP) and remove the port's info structure
2902 static int ib_mad_port_close(struct ib_device *device, int port_num)
2904 struct ib_mad_port_private *port_priv;
2905 unsigned long flags;
2907 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2908 port_priv = __ib_get_mad_port(device, port_num);
2909 if (port_priv == NULL) {
2910 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2911 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2914 list_del_init(&port_priv->port_list);
2915 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2917 destroy_workqueue(port_priv->wq);
2918 destroy_mad_qp(&port_priv->qp_info[1]);
2919 destroy_mad_qp(&port_priv->qp_info[0]);
2920 ib_dereg_mr(port_priv->mr);
2921 ib_dealloc_pd(port_priv->pd);
2922 ib_destroy_cq(port_priv->cq);
2923 cleanup_recv_queue(&port_priv->qp_info[1]);
2924 cleanup_recv_queue(&port_priv->qp_info[0]);
2925 /* XXX: Handle deallocation of MAD registration tables */
2932 static void ib_mad_init_device(struct ib_device *device)
2936 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2939 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2944 end = device->phys_port_cnt;
2947 for (i = start; i <= end; i++) {
2948 if (ib_mad_port_open(device, i)) {
2949 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2953 if (ib_agent_port_open(device, i)) {
2954 printk(KERN_ERR PFX "Couldn't open %s port %d "
2963 if (ib_mad_port_close(device, i))
2964 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2970 while (i >= start) {
2971 if (ib_agent_port_close(device, i))
2972 printk(KERN_ERR PFX "Couldn't close %s port %d "
2975 if (ib_mad_port_close(device, i))
2976 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2982 static void ib_mad_remove_device(struct ib_device *device)
2984 int i, num_ports, cur_port;
2986 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2990 num_ports = device->phys_port_cnt;
2993 for (i = 0; i < num_ports; i++, cur_port++) {
2994 if (ib_agent_port_close(device, cur_port))
2995 printk(KERN_ERR PFX "Couldn't close %s port %d "
2997 device->name, cur_port);
2998 if (ib_mad_port_close(device, cur_port))
2999 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
3000 device->name, cur_port);
3004 static struct ib_client mad_client = {
3006 .add = ib_mad_init_device,
3007 .remove = ib_mad_remove_device
3010 static int __init ib_mad_init_module(void)
3014 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3015 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3017 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3018 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3020 spin_lock_init(&ib_mad_port_list_lock);
3022 ib_mad_cache = kmem_cache_create("ib_mad",
3023 sizeof(struct ib_mad_private),
3027 if (!ib_mad_cache) {
3028 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3033 INIT_LIST_HEAD(&ib_mad_port_list);
3035 if (ib_register_client(&mad_client)) {
3036 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3044 kmem_cache_destroy(ib_mad_cache);
3049 static void __exit ib_mad_cleanup_module(void)
3051 ib_unregister_client(&mad_client);
3052 kmem_cache_destroy(ib_mad_cache);
3055 module_init(ib_mad_init_module);
3056 module_exit(ib_mad_cleanup_module);