2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_sa.h>
36 #include <rdma/ib_cache.h>
38 #include <linux/random.h>
39 #include <dev/mlx4/cmd.h>
40 #include <linux/gfp.h>
41 #include <rdma/ib_pma.h>
46 MLX4_IB_VENDOR_CLASS1 = 0x9,
47 MLX4_IB_VENDOR_CLASS2 = 0xa
50 #define MLX4_TUN_SEND_WRID_SHIFT 34
51 #define MLX4_TUN_QPN_SHIFT 32
52 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
53 #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
55 #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
56 #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
58 /* Port mgmt change event handling */
60 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
61 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
62 #define NUM_IDX_IN_PKEY_TBL_BLK 32
63 #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
64 #define GUID_TBL_BLK_NUM_ENTRIES 8
65 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
67 struct mlx4_mad_rcv_buf {
72 struct mlx4_mad_snd_buf {
76 struct mlx4_tunnel_mad {
78 struct mlx4_ib_tunnel_header hdr;
82 struct mlx4_rcv_tunnel_mad {
83 struct mlx4_rcv_tunnel_hdr hdr;
88 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
89 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
90 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
91 int block, u32 change_bitmap);
93 __be64 mlx4_ib_gen_node_guid(void)
95 #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
96 return cpu_to_be64(NODE_GUID_HI | random());
99 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
101 return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
102 cpu_to_be64(0xff00000000000000LL);
105 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
106 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
107 void *in_mad, void *response_mad)
109 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
112 u32 in_modifier = port;
115 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
116 if (IS_ERR(inmailbox))
117 return PTR_ERR(inmailbox);
118 inbox = inmailbox->buf;
120 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
121 if (IS_ERR(outmailbox)) {
122 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
123 return PTR_ERR(outmailbox);
126 memcpy(inbox, in_mad, 256);
129 * Key check traps can't be generated unless we have in_wc to
130 * tell us where to send the trap.
132 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
134 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
136 if (mlx4_is_mfunc(dev->dev) &&
137 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
153 memset(inbox + 256, 0, 256);
154 ext_info = inbox + 256;
156 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
157 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
158 ext_info->sl = in_wc->sl << 4;
159 ext_info->g_path = in_wc->dlid_path_bits |
160 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
161 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
164 memcpy(ext_info->grh, in_grh, 40);
168 in_modifier |= in_wc->slid << 16;
171 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
172 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
173 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
174 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
177 memcpy(response_mad, outmailbox->buf, 256);
179 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
180 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
185 static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
187 struct ib_ah *new_ah;
188 struct ib_ah_attr ah_attr;
191 if (!dev->send_agent[port_num - 1][0])
194 memset(&ah_attr, 0, sizeof ah_attr);
197 ah_attr.port_num = port_num;
199 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
204 spin_lock_irqsave(&dev->sm_lock, flags);
205 if (dev->sm_ah[port_num - 1])
206 ib_destroy_ah(dev->sm_ah[port_num - 1]);
207 dev->sm_ah[port_num - 1] = new_ah;
208 spin_unlock_irqrestore(&dev->sm_lock, flags);
212 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
213 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
215 static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
218 struct ib_port_info *pinfo;
221 u32 bn, pkey_change_bitmap;
225 struct mlx4_ib_dev *dev = to_mdev(ibdev);
226 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
227 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
228 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
229 switch (mad->mad_hdr.attr_id) {
230 case IB_SMP_ATTR_PORT_INFO:
231 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
232 lid = be16_to_cpu(pinfo->lid);
234 update_sm_ah(dev, port_num,
235 be16_to_cpu(pinfo->sm_lid),
236 pinfo->neighbormtu_mastersmsl & 0xf);
238 if (pinfo->clientrereg_resv_subnetto & 0x80)
239 handle_client_rereg_event(dev, port_num);
242 handle_lid_change_event(dev, port_num);
245 case IB_SMP_ATTR_PKEY_TABLE:
246 if (!mlx4_is_mfunc(dev->dev)) {
247 mlx4_ib_dispatch_event(dev, port_num,
248 IB_EVENT_PKEY_CHANGE);
252 /* at this point, we are running in the master.
253 * Slaves do not receive SMPs.
255 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
256 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
257 pkey_change_bitmap = 0;
258 for (i = 0; i < 32; i++) {
259 pr_debug("PKEY[%d] = x%x\n",
260 i + bn*32, be16_to_cpu(base[i]));
261 if (be16_to_cpu(base[i]) !=
262 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
263 pkey_change_bitmap |= (1 << i);
264 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
265 be16_to_cpu(base[i]);
268 pr_debug("PKEY Change event: port=%d, "
269 "block=0x%x, change_bitmap=0x%x\n",
270 port_num, bn, pkey_change_bitmap);
272 if (pkey_change_bitmap) {
273 mlx4_ib_dispatch_event(dev, port_num,
274 IB_EVENT_PKEY_CHANGE);
275 if (!dev->sriov.is_going_down)
276 __propagate_pkey_ev(dev, port_num, bn,
281 case IB_SMP_ATTR_GUID_INFO:
282 /* paravirtualized master's guid is guid 0 -- does not change */
283 if (!mlx4_is_master(dev->dev))
284 mlx4_ib_dispatch_event(dev, port_num,
285 IB_EVENT_GID_CHANGE);
286 /*if master, notify relevant slaves*/
287 if (mlx4_is_master(dev->dev) &&
288 !dev->sriov.is_going_down) {
289 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
290 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
291 (u8 *)(&((struct ib_smp *)mad)->data));
292 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
293 (u8 *)(&((struct ib_smp *)mad)->data));
302 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
303 int block, u32 change_bitmap)
305 int i, ix, slave, err;
308 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
309 if (slave == mlx4_master_func_num(dev->dev))
311 if (!mlx4_is_slave_active(dev->dev, slave))
315 for (i = 0; i < 32; i++) {
316 if (!(change_bitmap & (1 << i)))
319 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
320 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
321 [ix] == i + 32 * block) {
322 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
323 pr_debug("propagate_pkey_ev: slave %d,"
324 " port %d, ix %d (%d)\n",
325 slave, port_num, ix, err);
336 static void node_desc_override(struct ib_device *dev,
341 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
342 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
343 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
344 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
345 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
346 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
347 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
351 static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
353 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
354 struct ib_mad_send_buf *send_buf;
355 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
360 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
361 IB_MGMT_MAD_DATA, GFP_ATOMIC);
362 if (IS_ERR(send_buf))
365 * We rely here on the fact that MLX QPs don't use the
366 * address handle after the send is posted (this is
367 * wrong following the IB spec strictly, but we know
368 * it's OK for our devices).
370 spin_lock_irqsave(&dev->sm_lock, flags);
371 memcpy(send_buf->mad, mad, sizeof *mad);
372 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
373 ret = ib_post_send_mad(send_buf, NULL);
376 spin_unlock_irqrestore(&dev->sm_lock, flags);
379 ib_free_send_mad(send_buf);
383 static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
384 struct ib_sa_mad *sa_mad)
388 /* dispatch to different sa handlers */
389 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
390 case IB_SA_ATTR_MC_MEMBER_REC:
391 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
399 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
401 struct mlx4_ib_dev *dev = to_mdev(ibdev);
404 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
405 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
412 static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
413 u8 port, u16 pkey, u16 *ix)
416 u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
419 if (slave == mlx4_master_func_num(dev->dev))
420 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
422 unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
424 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
425 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
428 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
430 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
433 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
434 if (slot_pkey & 0x8000) {
438 /* take first partial pkey index found */
439 if (partial_ix == 0xFF)
440 partial_ix = pkey_ix;
445 if (partial_ix < 0xFF) {
446 *ix = (u16) partial_ix;
453 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
454 enum ib_qp_type dest_qpt, struct ib_wc *wc,
455 struct ib_grh *grh, struct ib_mad *mad)
458 struct ib_send_wr wr, *bad_wr;
459 struct mlx4_ib_demux_pv_ctx *tun_ctx;
460 struct mlx4_ib_demux_pv_qp *tun_qp;
461 struct mlx4_rcv_tunnel_mad *tun_mad;
462 struct ib_ah_attr attr;
464 struct ib_qp *src_qp = NULL;
465 unsigned tun_tx_ix = 0;
470 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
472 if (dest_qpt > IB_QPT_GSI)
475 tun_ctx = dev->sriov.demux[port-1].tun[slave];
477 /* check if proxy qp created */
478 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
481 /* QP0 forwarding only for Dom0 */
482 if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
486 tun_qp = &tun_ctx->qp[0];
488 tun_qp = &tun_ctx->qp[1];
490 /* compute P_Key index to put in tunnel header for slave */
493 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
497 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
500 tun_pkey_ix = pkey_ix;
502 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
504 dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
506 /* get tunnel tx data buf for slave */
509 /* create ah. Just need an empty one with the port num for the post send.
510 * The driver will set the force loopback bit in post_send */
511 memset(&attr, 0, sizeof attr);
512 attr.port_num = port;
514 memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
515 attr.ah_flags = IB_AH_GRH;
517 ah = ib_create_ah(tun_ctx->pd, &attr);
521 /* allocate tunnel tx buf after pass failure returns */
522 spin_lock(&tun_qp->tx_lock);
523 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
524 (MLX4_NUM_TUNNEL_BUFS - 1))
527 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
528 spin_unlock(&tun_qp->tx_lock);
532 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
533 if (tun_qp->tx_ring[tun_tx_ix].ah)
534 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
535 tun_qp->tx_ring[tun_tx_ix].ah = ah;
536 ib_dma_sync_single_for_cpu(&dev->ib_dev,
537 tun_qp->tx_ring[tun_tx_ix].buf.map,
538 sizeof (struct mlx4_rcv_tunnel_mad),
541 /* copy over to tunnel buffer */
543 memcpy(&tun_mad->grh, grh, sizeof *grh);
544 memcpy(&tun_mad->mad, mad, sizeof *mad);
546 /* adjust tunnel data */
547 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
548 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
549 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
553 if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
555 if (vlan != wc->vlan_id)
556 /* VST and default vlan is not the packet vlan drop the
560 /* VST , remove hide the vlan from the VF */
566 tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
567 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
568 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
570 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
571 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
574 ib_dma_sync_single_for_device(&dev->ib_dev,
575 tun_qp->tx_ring[tun_tx_ix].buf.map,
576 sizeof (struct mlx4_rcv_tunnel_mad),
579 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
580 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
581 list.lkey = tun_ctx->mr->lkey;
584 wr.wr.ud.port_num = port;
585 wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
586 wr.wr.ud.remote_qpn = dqpn;
588 wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
591 wr.opcode = IB_WR_SEND;
592 wr.send_flags = IB_SEND_SIGNALED;
594 ret = ib_post_send(src_qp, &wr, &bad_wr);
601 static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
602 struct ib_wc *wc, struct ib_grh *grh,
605 struct mlx4_ib_dev *dev = to_mdev(ibdev);
611 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
617 if (!(wc->wc_flags & IB_WC_GRH)) {
618 mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
621 if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
622 mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
625 if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
626 mlx4_ib_warn(ibdev, "failed matching grh\n");
629 if (slave >= dev->dev->caps.sqp_demux) {
630 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
631 slave, dev->dev->caps.sqp_demux);
635 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad, is_eth))
638 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
640 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
645 /* Initially assume that this mad is for us */
646 slave = mlx4_master_func_num(dev->dev);
648 /* See if the slave id is encoded in a response mad */
649 if (mad->mad_hdr.method & 0x80) {
650 slave_id = (u8 *) &mad->mad_hdr.tid;
652 if (slave != 255) /*255 indicates the dom0*/
653 *slave_id = 0; /* remap tid */
656 /* If a grh is present, we demux according to it */
657 if (wc->wc_flags & IB_WC_GRH) {
658 slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
660 mlx4_ib_warn(ibdev, "failed matching grh\n");
664 /* Class-specific handling */
665 switch (mad->mad_hdr.mgmt_class) {
666 case IB_MGMT_CLASS_SUBN_ADM:
667 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
668 (struct ib_sa_mad *) mad))
671 case IB_MGMT_CLASS_CM:
672 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad, is_eth))
675 case IB_MGMT_CLASS_DEVICE_MGMT:
676 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
680 /* Drop unsupported classes for slaves in tunnel mode */
681 if (slave != mlx4_master_func_num(dev->dev)) {
682 pr_debug("dropping unsupported ingress mad from class:%d "
683 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
687 /*make sure that no slave==255 was not handled yet.*/
688 if (slave >= dev->dev->caps.sqp_demux) {
689 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
690 slave, dev->dev->caps.sqp_demux);
694 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
696 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
701 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
702 struct ib_wc *in_wc, struct ib_grh *in_grh,
703 struct ib_mad *in_mad, struct ib_mad *out_mad)
705 u16 slid, prev_lid = 0;
707 struct ib_port_attr pattr;
709 if (in_wc && in_wc->qp->qp_num) {
710 pr_debug("received MAD: slid:%d sqpn:%d "
711 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
712 in_wc->slid, in_wc->src_qp,
713 in_wc->dlid_path_bits,
716 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
717 be16_to_cpu(in_mad->mad_hdr.attr_id));
718 if (in_wc->wc_flags & IB_WC_GRH) {
719 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
720 (unsigned long long)be64_to_cpu(in_grh->sgid.global.subnet_prefix),
721 (unsigned long long)be64_to_cpu(in_grh->sgid.global.interface_id));
722 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
723 (unsigned long long)be64_to_cpu(in_grh->dgid.global.subnet_prefix),
724 (unsigned long long)be64_to_cpu(in_grh->dgid.global.interface_id));
728 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
730 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
731 forward_trap(to_mdev(ibdev), port_num, in_mad);
732 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
735 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
736 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
737 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
738 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
739 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
740 return IB_MAD_RESULT_SUCCESS;
743 * Don't process SMInfo queries -- the SMA can't handle them.
745 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
746 return IB_MAD_RESULT_SUCCESS;
747 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
748 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
749 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
750 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
751 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
752 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
753 return IB_MAD_RESULT_SUCCESS;
755 return IB_MAD_RESULT_SUCCESS;
757 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
758 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
759 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
760 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
761 !ib_query_port(ibdev, port_num, &pattr))
762 prev_lid = pattr.lid;
764 err = mlx4_MAD_IFC(to_mdev(ibdev),
765 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
766 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
767 MLX4_MAD_IFC_NET_VIEW,
768 port_num, in_wc, in_grh, in_mad, out_mad);
770 return IB_MAD_RESULT_FAILURE;
772 if (!out_mad->mad_hdr.status) {
773 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
774 smp_snoop(ibdev, port_num, in_mad, prev_lid);
775 /* slaves get node desc from FW */
776 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
777 node_desc_override(ibdev, out_mad);
780 /* set return bit in status of directed route responses */
781 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
782 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
784 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
785 /* no response for trap repress */
786 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
788 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
791 static void edit_counter_ext(struct mlx4_if_stat_extended *cnt, void *counters,
795 case IB_PMA_PORT_COUNTERS:
797 struct ib_pma_portcounters *pma_cnt =
798 (struct ib_pma_portcounters *)counters;
799 pma_cnt->port_xmit_data =
800 cpu_to_be32((be64_to_cpu(cnt->counters[0].
802 be64_to_cpu(cnt->counters[0].
803 IfTxMulticastOctets) +
804 be64_to_cpu(cnt->counters[0].
805 IfTxBroadcastOctets) +
806 be64_to_cpu(cnt->counters[0].
807 IfTxDroppedOctets)) >> 2);
808 pma_cnt->port_rcv_data =
809 cpu_to_be32((be64_to_cpu(cnt->counters[0].
811 be64_to_cpu(cnt->counters[0].
812 IfRxMulticastOctets) +
813 be64_to_cpu(cnt->counters[0].
814 IfRxBroadcastOctets) +
815 be64_to_cpu(cnt->counters[0].
816 IfRxNoBufferOctets) +
817 be64_to_cpu(cnt->counters[0].
818 IfRxErrorOctets)) >> 2);
819 pma_cnt->port_xmit_packets =
820 cpu_to_be32(be64_to_cpu(cnt->counters[0].
822 be64_to_cpu(cnt->counters[0].
823 IfTxMulticastFrames) +
824 be64_to_cpu(cnt->counters[0].
825 IfTxBroadcastFrames) +
826 be64_to_cpu(cnt->counters[0].
828 pma_cnt->port_rcv_packets =
829 cpu_to_be32(be64_to_cpu(cnt->counters[0].
831 be64_to_cpu(cnt->counters[0].
832 IfRxMulticastFrames) +
833 be64_to_cpu(cnt->counters[0].
834 IfRxBroadcastFrames) +
835 be64_to_cpu(cnt->counters[0].
836 IfRxNoBufferFrames) +
837 be64_to_cpu(cnt->counters[0].
839 pma_cnt->port_rcv_errors = cpu_to_be32(be64_to_cpu(cnt->
845 case IB_PMA_PORT_COUNTERS_EXT:
847 struct ib_pma_portcounters_ext *pma_cnt_ext =
848 (struct ib_pma_portcounters_ext *)counters;
850 pma_cnt_ext->port_xmit_data =
851 cpu_to_be64((be64_to_cpu(cnt->counters[0].
853 be64_to_cpu(cnt->counters[0].
854 IfTxMulticastOctets) +
855 be64_to_cpu(cnt->counters[0].
856 IfTxBroadcastOctets) +
857 be64_to_cpu(cnt->counters[0].
858 IfTxDroppedOctets)) >> 2);
859 pma_cnt_ext->port_rcv_data =
860 cpu_to_be64((be64_to_cpu(cnt->counters[0].
862 be64_to_cpu(cnt->counters[0].
863 IfRxMulticastOctets) +
864 be64_to_cpu(cnt->counters[0].
865 IfRxBroadcastOctets) +
866 be64_to_cpu(cnt->counters[0].
867 IfRxNoBufferOctets) +
868 be64_to_cpu(cnt->counters[0].
869 IfRxErrorOctets)) >> 2);
870 pma_cnt_ext->port_xmit_packets =
871 cpu_to_be64(be64_to_cpu(cnt->counters[0].
873 be64_to_cpu(cnt->counters[0].
874 IfTxMulticastFrames) +
875 be64_to_cpu(cnt->counters[0].
876 IfTxBroadcastFrames) +
877 be64_to_cpu(cnt->counters[0].
879 pma_cnt_ext->port_rcv_packets =
880 cpu_to_be64(be64_to_cpu(cnt->counters[0].
882 be64_to_cpu(cnt->counters[0].
883 IfRxMulticastFrames) +
884 be64_to_cpu(cnt->counters[0].
885 IfRxBroadcastFrames) +
886 be64_to_cpu(cnt->counters[0].
887 IfRxNoBufferFrames) +
888 be64_to_cpu(cnt->counters[0].
890 pma_cnt_ext->port_unicast_xmit_packets = cnt->counters[0].
892 pma_cnt_ext->port_unicast_rcv_packets = cnt->counters[0].
894 pma_cnt_ext->port_multicast_xmit_packets =
895 cpu_to_be64(be64_to_cpu(cnt->counters[0].
896 IfTxMulticastFrames) +
897 be64_to_cpu(cnt->counters[0].
898 IfTxBroadcastFrames));
899 pma_cnt_ext->port_multicast_rcv_packets =
900 cpu_to_be64(be64_to_cpu(cnt->counters[0].
901 IfTxMulticastFrames) +
902 be64_to_cpu(cnt->counters[0].
903 IfTxBroadcastFrames));
909 pr_warn("Unsupported attr_id 0x%x\n", attr_id);
915 static void edit_counter(struct mlx4_if_stat_basic *cnt, void *counters,
919 case IB_PMA_PORT_COUNTERS:
921 struct ib_pma_portcounters *pma_cnt =
922 (struct ib_pma_portcounters *) counters;
923 pma_cnt->port_xmit_data =
924 cpu_to_be32(be64_to_cpu(
925 cnt->counters[0].IfTxOctets) >> 2);
926 pma_cnt->port_rcv_data =
927 cpu_to_be32(be64_to_cpu(
928 cnt->counters[0].IfRxOctets) >> 2);
929 pma_cnt->port_xmit_packets =
930 cpu_to_be32(be64_to_cpu(cnt->counters[0].IfTxFrames));
931 pma_cnt->port_rcv_packets =
932 cpu_to_be32(be64_to_cpu(cnt->counters[0].IfRxFrames));
935 case IB_PMA_PORT_COUNTERS_EXT:
937 struct ib_pma_portcounters_ext *pma_cnt_ext =
938 (struct ib_pma_portcounters_ext *) counters;
940 pma_cnt_ext->port_xmit_data =
941 cpu_to_be64((be64_to_cpu(cnt->counters[0].
943 pma_cnt_ext->port_rcv_data =
944 cpu_to_be64((be64_to_cpu(cnt->counters[0].
946 pma_cnt_ext->port_xmit_packets = cnt->counters[0].IfTxFrames;
947 pma_cnt_ext->port_rcv_packets = cnt->counters[0].IfRxFrames;
951 pr_warn("Unsupported attr_id 0x%x\n", attr_id);
956 int mlx4_ib_query_if_stat(struct mlx4_ib_dev *dev, u32 counter_index,
957 union mlx4_counter *counter, u8 clear)
959 struct mlx4_cmd_mailbox *mailbox;
961 u32 inmod = counter_index | ((clear & 1) << 31);
963 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
965 return IB_MAD_RESULT_FAILURE;
967 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
968 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
971 memcpy(counter, mailbox->buf, MLX4_IF_STAT_SZ(1));
973 mlx4_free_cmd_mailbox(dev->dev, mailbox);
978 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
979 struct ib_wc *in_wc, struct ib_grh *in_grh,
980 struct ib_mad *in_mad, struct ib_mad *out_mad)
982 struct mlx4_ib_dev *dev = to_mdev(ibdev);
984 u32 counter_index = dev->counters[port_num - 1].counter_index & 0xffff;
986 char counter_buf[MLX4_IF_STAT_SZ(1)];
987 union mlx4_counter *counter = (union mlx4_counter *)
990 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
993 /* in case of default counter IB shares the counter with ETH */
994 /* the state could be -EEXIST or -ENOSPC */
995 if (dev->counters[port_num - 1].status) {
996 memset(out_mad->data, 0, sizeof out_mad->data);
997 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
999 if (mlx4_ib_query_if_stat(dev, counter_index, counter, 0))
1000 return IB_MAD_RESULT_FAILURE;
1002 memset(out_mad->data, 0, sizeof(out_mad->data));
1003 mode = counter->control.cnt_mode & 0xFF;
1004 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
1005 switch (mode & 0xf) {
1007 edit_counter((void *)counter,
1008 (void *)(out_mad->data + 40),
1009 in_mad->mad_hdr.attr_id);
1012 edit_counter_ext((void *)counter,
1013 (void *)(out_mad->data + 40),
1014 in_mad->mad_hdr.attr_id);
1017 err = IB_MAD_RESULT_FAILURE;
1024 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1025 struct ib_wc *in_wc, struct ib_grh *in_grh,
1026 struct ib_mad *in_mad, struct ib_mad *out_mad)
1028 switch (rdma_port_get_link_layer(ibdev, port_num)) {
1029 case IB_LINK_LAYER_INFINIBAND:
1030 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
1031 in_grh, in_mad, out_mad);
1032 case IB_LINK_LAYER_ETHERNET:
1033 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
1034 in_grh, in_mad, out_mad);
1040 static void send_handler(struct ib_mad_agent *agent,
1041 struct ib_mad_send_wc *mad_send_wc)
1043 if (mad_send_wc->send_buf->context[0])
1044 ib_destroy_ah(mad_send_wc->send_buf->context[0]);
1045 ib_free_send_mad(mad_send_wc->send_buf);
1048 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
1050 struct ib_mad_agent *agent;
1053 enum rdma_link_layer ll;
1055 for (p = 0; p < dev->num_ports; ++p) {
1056 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
1057 for (q = 0; q <= 1; ++q) {
1058 if (ll == IB_LINK_LAYER_INFINIBAND) {
1059 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
1060 q ? IB_QPT_GSI : IB_QPT_SMI,
1061 NULL, 0, send_handler,
1063 if (IS_ERR(agent)) {
1064 ret = PTR_ERR(agent);
1067 dev->send_agent[p][q] = agent;
1069 dev->send_agent[p][q] = NULL;
1076 for (p = 0; p < dev->num_ports; ++p)
1077 for (q = 0; q <= 1; ++q)
1078 if (dev->send_agent[p][q])
1079 ib_unregister_mad_agent(dev->send_agent[p][q]);
1084 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
1086 struct ib_mad_agent *agent;
1089 for (p = 0; p < dev->num_ports; ++p) {
1090 for (q = 0; q <= 1; ++q) {
1091 agent = dev->send_agent[p][q];
1093 dev->send_agent[p][q] = NULL;
1094 ib_unregister_mad_agent(agent);
1099 ib_destroy_ah(dev->sm_ah[p]);
1103 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
1105 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
1107 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1108 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1109 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK, 0, 0);
1112 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
1114 /* re-configure the alias-guid and mcg's */
1115 if (mlx4_is_master(dev->dev)) {
1116 mlx4_ib_invalidate_all_guid_record(dev, port_num);
1118 if (!dev->sriov.is_going_down) {
1119 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
1120 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1121 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK, 0, 0);
1124 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
1127 static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
1128 struct mlx4_eqe *eqe)
1130 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
1131 GET_MASK_FROM_EQE(eqe));
1134 static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
1135 u32 guid_tbl_blk_num, u32 change_bitmap)
1137 struct ib_smp *in_mad = NULL;
1138 struct ib_smp *out_mad = NULL;
1141 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
1144 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
1145 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1146 if (!in_mad || !out_mad) {
1147 mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
1151 guid_tbl_blk_num *= 4;
1153 for (i = 0; i < 4; i++) {
1154 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
1156 memset(in_mad, 0, sizeof *in_mad);
1157 memset(out_mad, 0, sizeof *out_mad);
1159 in_mad->base_version = 1;
1160 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1161 in_mad->class_version = 1;
1162 in_mad->method = IB_MGMT_METHOD_GET;
1163 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
1164 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
1166 if (mlx4_MAD_IFC(dev,
1167 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
1168 port_num, NULL, NULL, in_mad, out_mad)) {
1169 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1173 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
1175 (u8 *)(&((struct ib_smp *)out_mad)->data));
1176 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
1178 (u8 *)(&((struct ib_smp *)out_mad)->data));
1187 void handle_port_mgmt_change_event(struct work_struct *work)
1189 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1190 struct mlx4_ib_dev *dev = ew->ib_dev;
1191 struct mlx4_eqe *eqe = &(ew->ib_eqe);
1192 u8 port = eqe->event.port_mgmt_change.port;
1197 switch (eqe->subtype) {
1198 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1199 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1201 /* Update the SM ah - This should be done before handling
1202 the other changed attributes so that MADs can be sent to the SM */
1203 if (changed_attr & MSTR_SM_CHANGE_MASK) {
1204 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1205 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1206 update_sm_ah(dev, port, lid, sl);
1207 mlx4_ib_dispatch_event(dev, port, IB_EVENT_SM_CHANGE);
1208 if (mlx4_is_master(dev->dev))
1209 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1210 changed_attr & MSTR_SM_CHANGE_MASK,
1214 /* Check if it is a lid change event */
1215 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
1216 handle_lid_change_event(dev, port);
1218 /* Generate GUID changed event */
1219 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1220 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1221 /*if master, notify all slaves*/
1222 if (mlx4_is_master(dev->dev))
1223 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1224 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK, 0, 0);
1227 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
1228 handle_client_rereg_event(dev, port);
1231 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1232 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
1233 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1234 propagate_pkey_ev(dev, port, eqe);
1236 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
1237 /* paravirtualized master's guid is guid 0 -- does not change */
1238 if (!mlx4_is_master(dev->dev))
1239 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1240 /*if master, notify relevant slaves*/
1241 else if (!dev->sriov.is_going_down) {
1242 tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1243 change_bitmap = GET_MASK_FROM_EQE(eqe);
1244 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1248 pr_warn("Unsupported subtype 0x%x for "
1249 "Port Management Change event\n", eqe->subtype);
1255 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1256 enum ib_event_type type)
1258 struct ib_event event;
1260 event.device = &dev->ib_dev;
1261 event.element.port_num = port_num;
1264 ib_dispatch_event(&event);
1267 static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1269 unsigned long flags;
1270 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1271 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1272 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1273 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1274 queue_work(ctx->wq, &ctx->work);
1275 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1278 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1279 struct mlx4_ib_demux_pv_qp *tun_qp,
1282 struct ib_sge sg_list;
1283 struct ib_recv_wr recv_wr, *bad_recv_wr;
1286 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1287 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1289 sg_list.addr = tun_qp->ring[index].map;
1290 sg_list.length = size;
1291 sg_list.lkey = ctx->mr->lkey;
1293 recv_wr.next = NULL;
1294 recv_wr.sg_list = &sg_list;
1295 recv_wr.num_sge = 1;
1296 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1297 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1298 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1299 size, DMA_FROM_DEVICE);
1300 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1303 static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1304 int slave, struct ib_sa_mad *sa_mad)
1308 /* dispatch to different sa handlers */
1309 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1310 case IB_SA_ATTR_MC_MEMBER_REC:
1311 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1319 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1321 int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
1323 return (qpn >= proxy_start && qpn <= proxy_start + 1);
1327 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1328 enum ib_qp_type dest_qpt, u16 pkey_index,
1329 u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
1330 u8 *s_mac, struct ib_mad *mad)
1333 struct ib_send_wr wr, *bad_wr;
1334 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1335 struct mlx4_ib_demux_pv_qp *sqp;
1336 struct mlx4_mad_snd_buf *sqp_mad;
1338 struct ib_qp *send_qp = NULL;
1339 unsigned wire_tx_ix = 0;
1346 sqp_ctx = dev->sriov.sqps[port-1];
1348 /* check if proxy qp created */
1349 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1352 /* QP0 forwarding only for Dom0 */
1353 if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
1356 if (dest_qpt == IB_QPT_SMI) {
1358 sqp = &sqp_ctx->qp[0];
1359 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1362 sqp = &sqp_ctx->qp[1];
1363 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1369 sgid_index = attr->grh.sgid_index;
1370 attr->grh.sgid_index = 0;
1371 ah = ib_create_ah(sqp_ctx->pd, attr);
1374 attr->grh.sgid_index = sgid_index;
1375 to_mah(ah)->av.ib.gid_index = sgid_index;
1376 /* get rid of force-loopback bit */
1377 to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1378 spin_lock(&sqp->tx_lock);
1379 if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1380 (MLX4_NUM_TUNNEL_BUFS - 1))
1383 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1384 spin_unlock(&sqp->tx_lock);
1388 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1389 if (sqp->tx_ring[wire_tx_ix].ah)
1390 ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1391 sqp->tx_ring[wire_tx_ix].ah = ah;
1392 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1393 sqp->tx_ring[wire_tx_ix].buf.map,
1394 sizeof (struct mlx4_mad_snd_buf),
1397 memcpy(&sqp_mad->payload, mad, sizeof *mad);
1399 ib_dma_sync_single_for_device(&dev->ib_dev,
1400 sqp->tx_ring[wire_tx_ix].buf.map,
1401 sizeof (struct mlx4_mad_snd_buf),
1404 list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1405 list.length = sizeof (struct mlx4_mad_snd_buf);
1406 list.lkey = sqp_ctx->mr->lkey;
1409 wr.wr.ud.port_num = port;
1410 wr.wr.ud.pkey_index = wire_pkey_ix;
1411 wr.wr.ud.remote_qkey = qkey;
1412 wr.wr.ud.remote_qpn = remote_qpn;
1414 wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1417 wr.opcode = IB_WR_SEND;
1418 wr.send_flags = IB_SEND_SIGNALED;
1420 memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
1423 ret = ib_post_send(send_qp, &wr, &bad_wr);
1430 static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1435 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1438 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1439 vfs = dev->dev->num_vfs;
1443 if (slave <= gids % vfs)
1444 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave - 1);
1446 return MLX4_ROCE_PF_GIDS + (gids % vfs) + ((gids / vfs) * (slave - 1));
1449 static int get_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1450 struct ib_ah_attr *ah_attr)
1452 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) {
1453 ah_attr->grh.sgid_index = slave;
1456 ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
1460 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1462 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1463 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1464 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1465 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1466 struct mlx4_ib_ah ah;
1467 struct ib_ah_attr ah_attr;
1471 /* Get slave that sent this packet */
1472 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1473 wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1474 (wc->src_qp & 0x1) != ctx->port - 1 ||
1476 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1479 slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1480 if (slave != ctx->slave) {
1481 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1482 "belongs to another slave\n", wc->src_qp);
1485 if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
1486 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1487 "non-master trying to send QP0 packets\n", wc->src_qp);
1491 /* Map transaction ID */
1492 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1493 sizeof (struct mlx4_tunnel_mad),
1495 switch (tunnel->mad.mad_hdr.method) {
1496 case IB_MGMT_METHOD_SET:
1497 case IB_MGMT_METHOD_GET:
1498 case IB_MGMT_METHOD_REPORT:
1499 case IB_SA_METHOD_GET_TABLE:
1500 case IB_SA_METHOD_DELETE:
1501 case IB_SA_METHOD_GET_MULTI:
1502 case IB_SA_METHOD_GET_TRACE_TBL:
1503 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1505 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1506 "class:%d slave:%d\n", *slave_id,
1507 tunnel->mad.mad_hdr.mgmt_class, slave);
1515 /* Class-specific handling */
1516 switch (tunnel->mad.mad_hdr.mgmt_class) {
1517 case IB_MGMT_CLASS_SUBN_ADM:
1518 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1519 (struct ib_sa_mad *) &tunnel->mad))
1522 case IB_MGMT_CLASS_CM:
1523 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1524 (struct ib_mad *) &tunnel->mad))
1527 case IB_MGMT_CLASS_DEVICE_MGMT:
1528 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1529 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1533 /* Drop unsupported classes for slaves in tunnel mode */
1534 if (slave != mlx4_master_func_num(dev->dev)) {
1535 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1536 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1541 /* We are using standard ib_core services to send the mad, so generate a
1542 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1543 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1544 ah.ibah.device = ctx->ib_dev;
1545 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1546 if (ah_attr.ah_flags & IB_AH_GRH)
1547 if (get_real_sgid_index(dev, slave, ctx->port, &ah_attr))
1549 memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
1550 ah_attr.vlan_id = tunnel->hdr.vlan;
1551 /* if slave have default vlan use it */
1552 mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
1553 &ah_attr.vlan_id, &ah_attr.sl);
1555 mlx4_ib_send_to_wire(dev, slave, ctx->port,
1556 is_proxy_qp0(dev, wc->src_qp, slave) ?
1557 IB_QPT_SMI : IB_QPT_GSI,
1558 be16_to_cpu(tunnel->hdr.pkey_index),
1559 be32_to_cpu(tunnel->hdr.remote_qpn),
1560 be32_to_cpu(tunnel->hdr.qkey),
1561 &ah_attr, wc->smac, &tunnel->mad);
1564 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1565 enum ib_qp_type qp_type, int is_tun)
1568 struct mlx4_ib_demux_pv_qp *tun_qp;
1569 int rx_buf_size, tx_buf_size;
1571 if (qp_type > IB_QPT_GSI)
1574 tun_qp = &ctx->qp[qp_type];
1576 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1581 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1582 sizeof (struct mlx4_ib_tun_tx_buf),
1584 if (!tun_qp->tx_ring) {
1585 kfree(tun_qp->ring);
1586 tun_qp->ring = NULL;
1591 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1592 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1594 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1595 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1598 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1599 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1600 if (!tun_qp->ring[i].addr)
1602 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1603 tun_qp->ring[i].addr,
1606 if (unlikely(ib_dma_mapping_error(ctx->ib_dev,
1607 tun_qp->ring[i].map))) {
1608 mlx4_ib_warn(ctx->ib_dev, "ib_dma_map_single failed\n");
1609 kfree(tun_qp->ring[i].addr);
1614 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1615 tun_qp->tx_ring[i].buf.addr =
1616 kmalloc(tx_buf_size, GFP_KERNEL);
1617 if (!tun_qp->tx_ring[i].buf.addr)
1619 tun_qp->tx_ring[i].buf.map =
1620 ib_dma_map_single(ctx->ib_dev,
1621 tun_qp->tx_ring[i].buf.addr,
1624 if (unlikely(ib_dma_mapping_error(ctx->ib_dev,
1625 tun_qp->tx_ring[i].buf.map))) {
1626 mlx4_ib_warn(ctx->ib_dev, "ib_dma_map_single failed\n");
1627 kfree(tun_qp->tx_ring[i].buf.addr);
1630 tun_qp->tx_ring[i].ah = NULL;
1632 spin_lock_init(&tun_qp->tx_lock);
1633 tun_qp->tx_ix_head = 0;
1634 tun_qp->tx_ix_tail = 0;
1635 tun_qp->proxy_qpt = qp_type;
1642 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1643 tx_buf_size, DMA_TO_DEVICE);
1644 kfree(tun_qp->tx_ring[i].buf.addr);
1646 kfree(tun_qp->tx_ring);
1647 tun_qp->tx_ring = NULL;
1648 i = MLX4_NUM_TUNNEL_BUFS;
1652 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1653 rx_buf_size, DMA_FROM_DEVICE);
1654 kfree(tun_qp->ring[i].addr);
1656 kfree(tun_qp->ring);
1657 tun_qp->ring = NULL;
1661 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1662 enum ib_qp_type qp_type, int is_tun)
1665 struct mlx4_ib_demux_pv_qp *tun_qp;
1666 int rx_buf_size, tx_buf_size;
1668 if (qp_type > IB_QPT_GSI)
1671 tun_qp = &ctx->qp[qp_type];
1673 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1674 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1676 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1677 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1681 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1682 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1683 rx_buf_size, DMA_FROM_DEVICE);
1684 kfree(tun_qp->ring[i].addr);
1687 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1688 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1689 tx_buf_size, DMA_TO_DEVICE);
1690 kfree(tun_qp->tx_ring[i].buf.addr);
1691 if (tun_qp->tx_ring[i].ah)
1692 ib_destroy_ah(tun_qp->tx_ring[i].ah);
1694 kfree(tun_qp->tx_ring);
1695 kfree(tun_qp->ring);
1698 static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1700 struct mlx4_ib_demux_pv_ctx *ctx;
1701 struct mlx4_ib_demux_pv_qp *tun_qp;
1704 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1705 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1707 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1708 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1709 if (wc.status == IB_WC_SUCCESS) {
1710 switch (wc.opcode) {
1712 mlx4_ib_multiplex_mad(ctx, &wc);
1713 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1715 (MLX4_NUM_TUNNEL_BUFS - 1));
1717 pr_err("Failed reposting tunnel "
1718 "buf:%lld\n", (unsigned long long)wc.wr_id);
1721 pr_debug("received tunnel send completion:"
1722 "wrid=0x%llx, status=0x%x\n",
1723 (unsigned long long)wc.wr_id, wc.status);
1724 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1725 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1726 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1728 spin_lock(&tun_qp->tx_lock);
1729 tun_qp->tx_ix_tail++;
1730 spin_unlock(&tun_qp->tx_lock);
1737 pr_debug("mlx4_ib: completion error in tunnel: %d."
1738 " status = %d, wrid = 0x%llx\n",
1739 ctx->slave, wc.status, (unsigned long long)wc.wr_id);
1740 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1741 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1742 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1743 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1745 spin_lock(&tun_qp->tx_lock);
1746 tun_qp->tx_ix_tail++;
1747 spin_unlock(&tun_qp->tx_lock);
1753 static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1755 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1757 /* It's worse than that! He's dead, Jim! */
1758 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1759 event->event, sqp->port);
1762 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1763 enum ib_qp_type qp_type, int create_tun)
1766 struct mlx4_ib_demux_pv_qp *tun_qp;
1767 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1768 struct ib_qp_attr attr;
1769 int qp_attr_mask_INIT;
1771 if (qp_type > IB_QPT_GSI)
1774 tun_qp = &ctx->qp[qp_type];
1776 memset(&qp_init_attr, 0, sizeof qp_init_attr);
1777 qp_init_attr.init_attr.send_cq = ctx->cq;
1778 qp_init_attr.init_attr.recv_cq = ctx->cq;
1779 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1780 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1781 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1782 qp_init_attr.init_attr.cap.max_send_sge = 1;
1783 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1785 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1786 qp_init_attr.init_attr.create_flags = (enum ib_qp_create_flags)MLX4_IB_SRIOV_TUNNEL_QP;
1787 qp_init_attr.port = ctx->port;
1788 qp_init_attr.slave = ctx->slave;
1789 qp_init_attr.proxy_qp_type = qp_type;
1790 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1791 IB_QP_QKEY | IB_QP_PORT;
1793 qp_init_attr.init_attr.qp_type = qp_type;
1794 qp_init_attr.init_attr.create_flags = (enum ib_qp_create_flags)MLX4_IB_SRIOV_SQP;
1795 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1797 qp_init_attr.init_attr.port_num = ctx->port;
1798 qp_init_attr.init_attr.qp_context = ctx;
1799 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1800 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1801 if (IS_ERR(tun_qp->qp)) {
1802 ret = PTR_ERR(tun_qp->qp);
1804 pr_err("Couldn't create %s QP (%d)\n",
1805 create_tun ? "tunnel" : "special", ret);
1809 memset(&attr, 0, sizeof attr);
1810 attr.qp_state = IB_QPS_INIT;
1813 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1814 ctx->port, 0xFFFF, &attr.pkey_index);
1815 if (ret || !create_tun)
1817 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1818 attr.qkey = IB_QP1_QKEY;
1819 attr.port_num = ctx->port;
1820 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1822 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1823 create_tun ? "tunnel" : "special", ret);
1826 attr.qp_state = IB_QPS_RTR;
1827 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1829 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1830 create_tun ? "tunnel" : "special", ret);
1833 attr.qp_state = IB_QPS_RTS;
1835 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1837 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1838 create_tun ? "tunnel" : "special", ret);
1842 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1843 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1845 pr_err(" mlx4_ib_post_pv_buf error"
1846 " (err = %d, i = %d)\n", ret, i);
1853 ib_destroy_qp(tun_qp->qp);
1859 * IB MAD completion callback for real SQPs
1861 static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1863 struct mlx4_ib_demux_pv_ctx *ctx;
1864 struct mlx4_ib_demux_pv_qp *sqp;
1869 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1870 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1872 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1873 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1874 if (wc.status == IB_WC_SUCCESS) {
1875 switch (wc.opcode) {
1877 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1878 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1879 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1881 spin_lock(&sqp->tx_lock);
1883 spin_unlock(&sqp->tx_lock);
1886 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1887 (sqp->ring[wc.wr_id &
1888 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1889 grh = &(((struct mlx4_mad_rcv_buf *)
1890 (sqp->ring[wc.wr_id &
1891 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1892 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1893 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1894 (MLX4_NUM_TUNNEL_BUFS - 1)))
1895 pr_err("Failed reposting SQP "
1896 "buf:%lld\n", (unsigned long long)wc.wr_id);
1903 pr_debug("mlx4_ib: completion error in tunnel: %d."
1904 " status = %d, wrid = 0x%llx\n",
1905 ctx->slave, wc.status, (unsigned long long)wc.wr_id);
1906 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1907 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1908 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1909 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1911 spin_lock(&sqp->tx_lock);
1913 spin_unlock(&sqp->tx_lock);
1919 static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1920 struct mlx4_ib_demux_pv_ctx **ret_ctx)
1922 struct mlx4_ib_demux_pv_ctx *ctx;
1925 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1927 pr_err("failed allocating pv resource context "
1928 "for port %d, slave %d\n", port, slave);
1932 ctx->ib_dev = &dev->ib_dev;
1939 static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1941 if (dev->sriov.demux[port - 1].tun[slave]) {
1942 kfree(dev->sriov.demux[port - 1].tun[slave]);
1943 dev->sriov.demux[port - 1].tun[slave] = NULL;
1947 static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1948 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1952 if (ctx->state != DEMUX_PV_STATE_DOWN)
1955 ctx->state = DEMUX_PV_STATE_STARTING;
1956 /* have QP0 only on port owner, and only if link layer is IB */
1957 if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
1958 rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
1962 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1964 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1969 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1971 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1975 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1979 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1980 NULL, ctx, cq_size, 0);
1981 if (IS_ERR(ctx->cq)) {
1982 ret = PTR_ERR(ctx->cq);
1983 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1987 ctx->pd = ib_alloc_pd(ctx->ib_dev);
1988 if (IS_ERR(ctx->pd)) {
1989 ret = PTR_ERR(ctx->pd);
1990 pr_err("Couldn't create tunnel PD (%d)\n", ret);
1994 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
1995 if (IS_ERR(ctx->mr)) {
1996 ret = PTR_ERR(ctx->mr);
1997 pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
2002 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
2004 pr_err("Couldn't create %s QP0 (%d)\n",
2005 create_tun ? "tunnel for" : "", ret);
2010 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
2012 pr_err("Couldn't create %s QP1 (%d)\n",
2013 create_tun ? "tunnel for" : "", ret);
2018 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
2020 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
2022 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
2024 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
2026 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
2029 ctx->state = DEMUX_PV_STATE_ACTIVE;
2034 ib_destroy_qp(ctx->qp[1].qp);
2035 ctx->qp[1].qp = NULL;
2040 ib_destroy_qp(ctx->qp[0].qp);
2041 ctx->qp[0].qp = NULL;
2044 ib_dereg_mr(ctx->mr);
2048 ib_dealloc_pd(ctx->pd);
2052 ib_destroy_cq(ctx->cq);
2056 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
2060 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
2062 ctx->state = DEMUX_PV_STATE_DOWN;
2066 static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
2067 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
2071 if (ctx->state > DEMUX_PV_STATE_DOWN) {
2072 ctx->state = DEMUX_PV_STATE_DOWNING;
2074 flush_workqueue(ctx->wq);
2076 ib_destroy_qp(ctx->qp[0].qp);
2077 ctx->qp[0].qp = NULL;
2078 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
2080 ib_destroy_qp(ctx->qp[1].qp);
2081 ctx->qp[1].qp = NULL;
2082 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
2083 ib_dereg_mr(ctx->mr);
2085 ib_dealloc_pd(ctx->pd);
2087 ib_destroy_cq(ctx->cq);
2089 ctx->state = DEMUX_PV_STATE_DOWN;
2093 static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
2094 int port, int do_init)
2099 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
2100 /* for master, destroy real sqp resources */
2101 if (slave == mlx4_master_func_num(dev->dev))
2102 destroy_pv_resources(dev, slave, port,
2103 dev->sriov.sqps[port - 1], 1);
2104 /* destroy the tunnel qp resources */
2105 destroy_pv_resources(dev, slave, port,
2106 dev->sriov.demux[port - 1].tun[slave], 1);
2110 /* create the tunnel qp resources */
2111 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
2112 dev->sriov.demux[port - 1].tun[slave]);
2114 /* for master, create the real sqp resources */
2115 if (!ret && slave == mlx4_master_func_num(dev->dev))
2116 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
2117 dev->sriov.sqps[port - 1]);
2121 void mlx4_ib_tunnels_update_work(struct work_struct *work)
2123 struct mlx4_ib_demux_work *dmxw;
2125 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
2126 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
2132 static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
2133 struct mlx4_ib_demux_ctx *ctx,
2140 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
2141 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
2147 ctx->ib_dev = &dev->ib_dev;
2149 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2150 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
2157 ret = mlx4_ib_mcg_port_init(ctx);
2159 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
2163 snprintf(name, sizeof name, "mlx4_ibt%d", port);
2164 ctx->wq = create_singlethread_workqueue(name);
2166 pr_err("Failed to create tunnelling WQ for port %d\n", port);
2171 snprintf(name, sizeof name, "mlx4_ibud%d", port);
2172 ctx->ud_wq = create_singlethread_workqueue(name);
2174 pr_err("Failed to create up/down WQ for port %d\n", port);
2182 destroy_workqueue(ctx->wq);
2186 mlx4_ib_mcg_port_cleanup(ctx, 1);
2188 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
2189 free_pv_object(dev, i, port);
2195 static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2197 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2198 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2199 flush_workqueue(sqp_ctx->wq);
2200 if (sqp_ctx->has_smi) {
2201 ib_destroy_qp(sqp_ctx->qp[0].qp);
2202 sqp_ctx->qp[0].qp = NULL;
2203 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2205 ib_destroy_qp(sqp_ctx->qp[1].qp);
2206 sqp_ctx->qp[1].qp = NULL;
2207 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2208 ib_dereg_mr(sqp_ctx->mr);
2210 ib_dealloc_pd(sqp_ctx->pd);
2212 ib_destroy_cq(sqp_ctx->cq);
2214 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2218 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2222 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
2223 mlx4_ib_mcg_port_cleanup(ctx, 1);
2224 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2227 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2228 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2230 flush_workqueue(ctx->wq);
2231 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2232 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2233 free_pv_object(dev, i, ctx->port);
2236 destroy_workqueue(ctx->ud_wq);
2237 destroy_workqueue(ctx->wq);
2241 static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2245 if (!mlx4_is_master(dev->dev))
2247 /* initialize or tear down tunnel QPs for the master */
2248 for (i = 0; i < dev->dev->caps.num_ports; i++)
2249 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2253 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2258 if (!mlx4_is_mfunc(dev->dev))
2261 dev->sriov.is_going_down = 0;
2262 spin_lock_init(&dev->sriov.going_down_lock);
2263 mlx4_ib_cm_paravirt_init(dev);
2265 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2267 if (mlx4_is_slave(dev->dev)) {
2268 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2272 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2273 if (i == mlx4_master_func_num(dev->dev))
2274 mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2276 mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2279 err = mlx4_ib_init_alias_guid_service(dev);
2281 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2284 err = mlx4_ib_device_register_sysfs(dev);
2286 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2290 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2291 dev->dev->caps.sqp_demux);
2292 for (i = 0; i < dev->num_ports; i++) {
2294 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2297 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2298 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2299 &dev->sriov.sqps[i]);
2302 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2306 mlx4_ib_master_tunnels(dev, 1);
2311 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2312 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2315 mlx4_ib_device_unregister_sysfs(dev);
2318 mlx4_ib_destroy_alias_guid_service(dev);
2321 mlx4_ib_cm_paravirt_clean(dev, -1);
2326 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2329 unsigned long flags;
2331 if (!mlx4_is_mfunc(dev->dev))
2334 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2335 dev->sriov.is_going_down = 1;
2336 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2337 if (mlx4_is_master(dev->dev)) {
2338 for (i = 0; i < dev->num_ports; i++) {
2339 flush_workqueue(dev->sriov.demux[i].ud_wq);
2340 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2341 kfree(dev->sriov.sqps[i]);
2342 dev->sriov.sqps[i] = NULL;
2343 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2346 mlx4_ib_cm_paravirt_clean(dev, -1);
2347 mlx4_ib_destroy_alias_guid_service(dev);
2348 mlx4_ib_device_unregister_sysfs(dev);