2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/compat.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
61 struct list_head list;
69 struct list_head list;
84 struct list_head list;
86 enum mlx4_protocol prot;
87 enum mlx4_steer_type steer;
91 RES_QP_BUSY = RES_ANY_BUSY,
93 /* QP number was allocated */
96 /* ICM memory for QP context was mapped */
99 /* QP is in hw ownership */
104 struct res_common com;
109 struct list_head mcg_list;
114 enum res_mtt_states {
115 RES_MTT_BUSY = RES_ANY_BUSY,
119 static inline const char *mtt_states_str(enum res_mtt_states state)
122 case RES_MTT_BUSY: return "RES_MTT_BUSY";
123 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
124 default: return "Unknown";
129 struct res_common com;
134 enum res_mpt_states {
135 RES_MPT_BUSY = RES_ANY_BUSY,
142 struct res_common com;
148 RES_EQ_BUSY = RES_ANY_BUSY,
154 struct res_common com;
159 RES_CQ_BUSY = RES_ANY_BUSY,
165 struct res_common com;
170 enum res_srq_states {
171 RES_SRQ_BUSY = RES_ANY_BUSY,
177 struct res_common com;
183 enum res_counter_states {
184 RES_COUNTER_BUSY = RES_ANY_BUSY,
185 RES_COUNTER_ALLOCATED,
189 struct res_common com;
193 enum res_xrcdn_states {
194 RES_XRCD_BUSY = RES_ANY_BUSY,
199 struct res_common com;
203 enum res_fs_rule_states {
204 RES_FS_RULE_BUSY = RES_ANY_BUSY,
205 RES_FS_RULE_ALLOCATED,
209 struct res_common com;
212 static int mlx4_is_eth(struct mlx4_dev *dev, int port)
214 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
217 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
219 struct rb_node *node = root->rb_node;
222 struct res_common *res = container_of(node, struct res_common,
225 if (res_id < res->res_id)
226 node = node->rb_left;
227 else if (res_id > res->res_id)
228 node = node->rb_right;
235 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
237 struct rb_node **new = &(root->rb_node), *parent = NULL;
239 /* Figure out where to put new node */
241 struct res_common *this = container_of(*new, struct res_common,
245 if (res->res_id < this->res_id)
246 new = &((*new)->rb_left);
247 else if (res->res_id > this->res_id)
248 new = &((*new)->rb_right);
253 /* Add new node and rebalance tree. */
254 rb_link_node(&res->node, parent, new);
255 rb_insert_color(&res->node, root);
270 static const char *ResourceType(enum mlx4_resource rt)
273 case RES_QP: return "RES_QP";
274 case RES_CQ: return "RES_CQ";
275 case RES_SRQ: return "RES_SRQ";
276 case RES_MPT: return "RES_MPT";
277 case RES_MTT: return "RES_MTT";
278 case RES_MAC: return "RES_MAC";
279 case RES_VLAN: return "RES_VLAN";
280 case RES_EQ: return "RES_EQ";
281 case RES_COUNTER: return "RES_COUNTER";
282 case RES_FS_RULE: return "RES_FS_RULE";
283 case RES_XRCD: return "RES_XRCD";
284 default: return "Unknown resource type !!!";
288 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
289 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
290 enum mlx4_resource res_type, int count,
293 struct mlx4_priv *priv = mlx4_priv(dev);
294 struct resource_allocator *res_alloc =
295 &priv->mfunc.master.res_tracker.res_alloc[res_type];
297 int allocated, free, reserved, guaranteed, from_free;
299 spin_lock(&res_alloc->alloc_lock);
300 allocated = (port > 0) ?
301 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
302 res_alloc->allocated[slave];
303 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
305 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
306 res_alloc->res_reserved;
307 guaranteed = res_alloc->guaranteed[slave];
309 if (allocated + count > res_alloc->quota[slave])
312 if (allocated + count <= guaranteed) {
315 /* portion may need to be obtained from free area */
316 if (guaranteed - allocated > 0)
317 from_free = count - (guaranteed - allocated);
321 if (free - from_free > reserved)
326 /* grant the request */
328 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
329 res_alloc->res_port_free[port - 1] -= count;
331 res_alloc->allocated[slave] += count;
332 res_alloc->res_free -= count;
337 spin_unlock(&res_alloc->alloc_lock);
342 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
343 enum mlx4_resource res_type, int count,
346 struct mlx4_priv *priv = mlx4_priv(dev);
347 struct resource_allocator *res_alloc =
348 &priv->mfunc.master.res_tracker.res_alloc[res_type];
350 spin_lock(&res_alloc->alloc_lock);
352 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
353 res_alloc->res_port_free[port - 1] += count;
355 res_alloc->allocated[slave] -= count;
356 res_alloc->res_free += count;
359 spin_unlock(&res_alloc->alloc_lock);
363 static inline void initialize_res_quotas(struct mlx4_dev *dev,
364 struct resource_allocator *res_alloc,
365 enum mlx4_resource res_type,
366 int vf, int num_instances)
368 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
369 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
370 if (vf == mlx4_master_func_num(dev)) {
371 res_alloc->res_free = num_instances;
372 if (res_type == RES_MTT) {
373 /* reserved mtts will be taken out of the PF allocation */
374 res_alloc->res_free += dev->caps.reserved_mtts;
375 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
376 res_alloc->quota[vf] += dev->caps.reserved_mtts;
381 void mlx4_init_quotas(struct mlx4_dev *dev)
383 struct mlx4_priv *priv = mlx4_priv(dev);
386 /* quotas for VFs are initialized in mlx4_slave_cap */
387 if (mlx4_is_slave(dev))
390 if (!mlx4_is_mfunc(dev)) {
391 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
392 mlx4_num_reserved_sqps(dev);
393 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
394 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
395 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
396 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
400 pf = mlx4_master_func_num(dev);
402 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
404 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
406 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
408 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
410 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
412 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
414 struct mlx4_priv *priv = mlx4_priv(dev);
418 priv->mfunc.master.res_tracker.slave_list =
419 kzalloc(dev->num_slaves * sizeof(struct slave_list),
421 if (!priv->mfunc.master.res_tracker.slave_list)
424 for (i = 0 ; i < dev->num_slaves; i++) {
425 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
426 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
427 slave_list[i].res_list[t]);
428 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
431 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
433 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
434 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
436 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
437 struct resource_allocator *res_alloc =
438 &priv->mfunc.master.res_tracker.res_alloc[i];
439 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
440 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
441 if (i == RES_MAC || i == RES_VLAN)
442 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
443 (dev->num_vfs + 1) * sizeof(int),
446 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
448 if (!res_alloc->quota || !res_alloc->guaranteed ||
449 !res_alloc->allocated)
452 spin_lock_init(&res_alloc->alloc_lock);
453 for (t = 0; t < dev->num_vfs + 1; t++) {
456 initialize_res_quotas(dev, res_alloc, RES_QP,
457 t, dev->caps.num_qps -
458 dev->caps.reserved_qps -
459 mlx4_num_reserved_sqps(dev));
462 initialize_res_quotas(dev, res_alloc, RES_CQ,
463 t, dev->caps.num_cqs -
464 dev->caps.reserved_cqs);
467 initialize_res_quotas(dev, res_alloc, RES_SRQ,
468 t, dev->caps.num_srqs -
469 dev->caps.reserved_srqs);
472 initialize_res_quotas(dev, res_alloc, RES_MPT,
473 t, dev->caps.num_mpts -
474 dev->caps.reserved_mrws);
477 initialize_res_quotas(dev, res_alloc, RES_MTT,
478 t, dev->caps.num_mtts -
479 dev->caps.reserved_mtts);
482 if (t == mlx4_master_func_num(dev)) {
483 res_alloc->quota[t] =
484 MLX4_MAX_MAC_NUM - 2 * dev->num_vfs;
485 res_alloc->guaranteed[t] = res_alloc->quota[t];
486 for (j = 0; j < MLX4_MAX_PORTS; j++)
487 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
489 res_alloc->quota[t] = 2;
490 res_alloc->guaranteed[t] = 2;
494 if (t == mlx4_master_func_num(dev)) {
495 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
496 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
497 for (j = 0; j < MLX4_MAX_PORTS; j++)
498 res_alloc->res_port_free[j] =
501 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
502 res_alloc->guaranteed[t] = 0;
506 res_alloc->quota[t] = dev->caps.max_counters;
507 res_alloc->guaranteed[t] = 0;
508 if (t == mlx4_master_func_num(dev))
509 res_alloc->res_free = res_alloc->quota[t];
514 if (i == RES_MAC || i == RES_VLAN) {
515 for (j = 0; j < MLX4_MAX_PORTS; j++)
516 res_alloc->res_port_rsvd[j] +=
517 res_alloc->guaranteed[t];
519 res_alloc->res_reserved += res_alloc->guaranteed[t];
523 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
527 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
528 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
529 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
530 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
531 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
532 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
533 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
538 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
539 enum mlx4_res_tracker_free_type type)
541 struct mlx4_priv *priv = mlx4_priv(dev);
544 if (priv->mfunc.master.res_tracker.slave_list) {
545 if (type != RES_TR_FREE_STRUCTS_ONLY) {
546 for (i = 0; i < dev->num_slaves; i++) {
547 if (type == RES_TR_FREE_ALL ||
548 dev->caps.function != i)
549 mlx4_delete_all_resources_for_slave(dev, i);
551 /* free master's vlans */
552 i = dev->caps.function;
553 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
554 rem_slave_vlans(dev, i);
555 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
558 if (type != RES_TR_FREE_SLAVES_ONLY) {
559 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
560 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
561 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
562 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
563 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
564 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
565 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
567 kfree(priv->mfunc.master.res_tracker.slave_list);
568 priv->mfunc.master.res_tracker.slave_list = NULL;
573 static void update_pkey_index(struct mlx4_dev *dev, int slave,
574 struct mlx4_cmd_mailbox *inbox)
576 u8 sched = *(u8 *)(inbox->buf + 64);
577 u8 orig_index = *(u8 *)(inbox->buf + 35);
579 struct mlx4_priv *priv = mlx4_priv(dev);
582 port = (sched >> 6 & 1) + 1;
584 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
585 *(u8 *)(inbox->buf + 35) = new_index;
588 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
591 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
592 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
593 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
596 if (MLX4_QP_ST_UD == ts) {
597 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
598 if (mlx4_is_eth(dev, port))
599 qp_ctx->pri_path.mgid_index = mlx4_get_base_gid_ix(dev, slave) | 0x80;
601 qp_ctx->pri_path.mgid_index = 0x80 | slave;
603 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
604 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
605 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
606 if (mlx4_is_eth(dev, port)) {
607 qp_ctx->pri_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
608 qp_ctx->pri_path.mgid_index &= 0x7f;
610 qp_ctx->pri_path.mgid_index = slave & 0x7F;
613 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
614 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
615 if (mlx4_is_eth(dev, port)) {
616 qp_ctx->alt_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
617 qp_ctx->alt_path.mgid_index &= 0x7f;
619 qp_ctx->alt_path.mgid_index = slave & 0x7F;
625 static int update_vport_qp_param(struct mlx4_dev *dev,
626 struct mlx4_cmd_mailbox *inbox,
629 struct mlx4_qp_context *qpc = inbox->buf + 8;
630 struct mlx4_vport_oper_state *vp_oper;
631 struct mlx4_priv *priv;
635 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
636 priv = mlx4_priv(dev);
637 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
639 if (MLX4_VGT != vp_oper->state.default_vlan) {
640 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
641 if (MLX4_QP_ST_RC == qp_type)
644 qpc->srqn |= cpu_to_be32(1 << 25); /*set cqe vlan mask */
645 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
646 qpc->pri_path.fl = 1 << 6; /* set cv bit*/
647 qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
648 qpc->pri_path.sched_queue &= 0xC7;
649 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
650 mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
651 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
652 (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
653 vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
654 (int)(qpc->pri_path.fl));
656 if (vp_oper->state.spoofchk) {
657 qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
658 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
659 mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n",
660 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
661 (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
667 static int mpt_mask(struct mlx4_dev *dev)
669 return dev->caps.num_mpts - 1;
672 static void *find_res(struct mlx4_dev *dev, int res_id,
673 enum mlx4_resource type)
675 struct mlx4_priv *priv = mlx4_priv(dev);
677 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
681 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
682 enum mlx4_resource type,
685 struct res_common *r;
688 spin_lock_irq(mlx4_tlock(dev));
689 r = find_res(dev, res_id, type);
695 if (r->state == RES_ANY_BUSY) {
700 if (r->owner != slave) {
705 r->from_state = r->state;
706 r->state = RES_ANY_BUSY;
709 *((struct res_common **)res) = r;
712 spin_unlock_irq(mlx4_tlock(dev));
716 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
717 enum mlx4_resource type,
718 u64 res_id, int *slave)
721 struct res_common *r;
727 spin_lock(mlx4_tlock(dev));
729 r = find_res(dev, id, type);
734 spin_unlock(mlx4_tlock(dev));
739 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
740 enum mlx4_resource type)
742 struct res_common *r;
744 spin_lock_irq(mlx4_tlock(dev));
745 r = find_res(dev, res_id, type);
747 r->state = r->from_state;
748 spin_unlock_irq(mlx4_tlock(dev));
751 static struct res_common *alloc_qp_tr(int id)
755 ret = kzalloc(sizeof *ret, GFP_KERNEL);
759 ret->com.res_id = id;
760 ret->com.state = RES_QP_RESERVED;
762 INIT_LIST_HEAD(&ret->mcg_list);
763 spin_lock_init(&ret->mcg_spl);
768 static struct res_common *alloc_mtt_tr(int id, int order)
772 ret = kzalloc(sizeof *ret, GFP_KERNEL);
776 ret->com.res_id = id;
778 ret->com.state = RES_MTT_ALLOCATED;
779 atomic_set(&ret->ref_count, 0);
784 static struct res_common *alloc_mpt_tr(int id, int key)
788 ret = kzalloc(sizeof *ret, GFP_KERNEL);
792 ret->com.res_id = id;
793 ret->com.state = RES_MPT_RESERVED;
799 static struct res_common *alloc_eq_tr(int id)
803 ret = kzalloc(sizeof *ret, GFP_KERNEL);
807 ret->com.res_id = id;
808 ret->com.state = RES_EQ_RESERVED;
813 static struct res_common *alloc_cq_tr(int id)
817 ret = kzalloc(sizeof *ret, GFP_KERNEL);
821 ret->com.res_id = id;
822 ret->com.state = RES_CQ_ALLOCATED;
823 atomic_set(&ret->ref_count, 0);
828 static struct res_common *alloc_srq_tr(int id)
832 ret = kzalloc(sizeof *ret, GFP_KERNEL);
836 ret->com.res_id = id;
837 ret->com.state = RES_SRQ_ALLOCATED;
838 atomic_set(&ret->ref_count, 0);
843 static struct res_common *alloc_counter_tr(int id)
845 struct res_counter *ret;
847 ret = kzalloc(sizeof *ret, GFP_KERNEL);
851 ret->com.res_id = id;
852 ret->com.state = RES_COUNTER_ALLOCATED;
857 static struct res_common *alloc_xrcdn_tr(int id)
859 struct res_xrcdn *ret;
861 ret = kzalloc(sizeof *ret, GFP_KERNEL);
865 ret->com.res_id = id;
866 ret->com.state = RES_XRCD_ALLOCATED;
871 static struct res_common *alloc_fs_rule_tr(u64 id)
873 struct res_fs_rule *ret;
875 ret = kzalloc(sizeof *ret, GFP_KERNEL);
879 ret->com.res_id = id;
880 ret->com.state = RES_FS_RULE_ALLOCATED;
885 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
888 struct res_common *ret;
892 ret = alloc_qp_tr(id);
895 ret = alloc_mpt_tr(id, extra);
898 ret = alloc_mtt_tr(id, extra);
901 ret = alloc_eq_tr(id);
904 ret = alloc_cq_tr(id);
907 ret = alloc_srq_tr(id);
910 printk(KERN_ERR "implementation missing\n");
913 ret = alloc_counter_tr(id);
916 ret = alloc_xrcdn_tr(id);
919 ret = alloc_fs_rule_tr(id);
930 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
931 enum mlx4_resource type, int extra)
935 struct mlx4_priv *priv = mlx4_priv(dev);
936 struct res_common **res_arr;
937 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
938 struct rb_root *root = &tracker->res_tree[type];
940 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
944 for (i = 0; i < count; ++i) {
945 res_arr[i] = alloc_tr(base + i, type, slave, extra);
947 for (--i; i >= 0; --i)
955 spin_lock_irq(mlx4_tlock(dev));
956 for (i = 0; i < count; ++i) {
957 if (find_res(dev, base + i, type)) {
961 err = res_tracker_insert(root, res_arr[i]);
964 list_add_tail(&res_arr[i]->list,
965 &tracker->slave_list[slave].res_list[type]);
967 spin_unlock_irq(mlx4_tlock(dev));
973 for (--i; i >= base; --i)
974 rb_erase(&res_arr[i]->node, root);
976 spin_unlock_irq(mlx4_tlock(dev));
978 for (i = 0; i < count; ++i)
986 static int remove_qp_ok(struct res_qp *res)
988 if (res->com.state == RES_QP_BUSY)
990 else if (res->com.state != RES_QP_RESERVED)
996 static int remove_mtt_ok(struct res_mtt *res, int order)
998 if (res->com.state == RES_MTT_BUSY ||
999 atomic_read(&res->ref_count)) {
1000 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1002 mtt_states_str(res->com.state),
1003 atomic_read(&res->ref_count));
1005 } else if (res->com.state != RES_MTT_ALLOCATED)
1007 else if (res->order != order)
1013 static int remove_mpt_ok(struct res_mpt *res)
1015 if (res->com.state == RES_MPT_BUSY)
1017 else if (res->com.state != RES_MPT_RESERVED)
1023 static int remove_eq_ok(struct res_eq *res)
1025 if (res->com.state == RES_MPT_BUSY)
1027 else if (res->com.state != RES_MPT_RESERVED)
1033 static int remove_counter_ok(struct res_counter *res)
1035 if (res->com.state == RES_COUNTER_BUSY)
1037 else if (res->com.state != RES_COUNTER_ALLOCATED)
1043 static int remove_xrcdn_ok(struct res_xrcdn *res)
1045 if (res->com.state == RES_XRCD_BUSY)
1047 else if (res->com.state != RES_XRCD_ALLOCATED)
1053 static int remove_fs_rule_ok(struct res_fs_rule *res)
1055 if (res->com.state == RES_FS_RULE_BUSY)
1057 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1063 static int remove_cq_ok(struct res_cq *res)
1065 if (res->com.state == RES_CQ_BUSY)
1067 else if (res->com.state != RES_CQ_ALLOCATED)
1073 static int remove_srq_ok(struct res_srq *res)
1075 if (res->com.state == RES_SRQ_BUSY)
1077 else if (res->com.state != RES_SRQ_ALLOCATED)
1083 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1087 return remove_qp_ok((struct res_qp *)res);
1089 return remove_cq_ok((struct res_cq *)res);
1091 return remove_srq_ok((struct res_srq *)res);
1093 return remove_mpt_ok((struct res_mpt *)res);
1095 return remove_mtt_ok((struct res_mtt *)res, extra);
1099 return remove_eq_ok((struct res_eq *)res);
1101 return remove_counter_ok((struct res_counter *)res);
1103 return remove_xrcdn_ok((struct res_xrcdn *)res);
1105 return remove_fs_rule_ok((struct res_fs_rule *)res);
1111 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1112 enum mlx4_resource type, int extra)
1116 struct mlx4_priv *priv = mlx4_priv(dev);
1117 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1118 struct res_common *r;
1120 spin_lock_irq(mlx4_tlock(dev));
1121 for (i = base; i < base + count; ++i) {
1122 r = res_tracker_lookup(&tracker->res_tree[type], i);
1127 if (r->owner != slave) {
1131 err = remove_ok(r, type, extra);
1136 for (i = base; i < base + count; ++i) {
1137 r = res_tracker_lookup(&tracker->res_tree[type], i);
1138 rb_erase(&r->node, &tracker->res_tree[type]);
1145 spin_unlock_irq(mlx4_tlock(dev));
1150 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1151 enum res_qp_states state, struct res_qp **qp,
1154 struct mlx4_priv *priv = mlx4_priv(dev);
1155 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1159 spin_lock_irq(mlx4_tlock(dev));
1160 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1163 else if (r->com.owner != slave)
1168 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1169 __func__, r->com.res_id);
1173 case RES_QP_RESERVED:
1174 if (r->com.state == RES_QP_MAPPED && !alloc)
1177 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1182 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1183 r->com.state == RES_QP_HW)
1186 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1194 if (r->com.state != RES_QP_MAPPED)
1202 r->com.from_state = r->com.state;
1203 r->com.to_state = state;
1204 r->com.state = RES_QP_BUSY;
1210 spin_unlock_irq(mlx4_tlock(dev));
1215 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1216 enum res_mpt_states state, struct res_mpt **mpt)
1218 struct mlx4_priv *priv = mlx4_priv(dev);
1219 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1223 spin_lock_irq(mlx4_tlock(dev));
1224 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1227 else if (r->com.owner != slave)
1235 case RES_MPT_RESERVED:
1236 if (r->com.state != RES_MPT_MAPPED)
1240 case RES_MPT_MAPPED:
1241 if (r->com.state != RES_MPT_RESERVED &&
1242 r->com.state != RES_MPT_HW)
1247 if (r->com.state != RES_MPT_MAPPED)
1255 r->com.from_state = r->com.state;
1256 r->com.to_state = state;
1257 r->com.state = RES_MPT_BUSY;
1263 spin_unlock_irq(mlx4_tlock(dev));
1268 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1269 enum res_eq_states state, struct res_eq **eq)
1271 struct mlx4_priv *priv = mlx4_priv(dev);
1272 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1276 spin_lock_irq(mlx4_tlock(dev));
1277 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1280 else if (r->com.owner != slave)
1288 case RES_EQ_RESERVED:
1289 if (r->com.state != RES_EQ_HW)
1294 if (r->com.state != RES_EQ_RESERVED)
1303 r->com.from_state = r->com.state;
1304 r->com.to_state = state;
1305 r->com.state = RES_EQ_BUSY;
1311 spin_unlock_irq(mlx4_tlock(dev));
1316 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1317 enum res_cq_states state, struct res_cq **cq)
1319 struct mlx4_priv *priv = mlx4_priv(dev);
1320 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1324 spin_lock_irq(mlx4_tlock(dev));
1325 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1328 else if (r->com.owner != slave)
1336 case RES_CQ_ALLOCATED:
1337 if (r->com.state != RES_CQ_HW)
1339 else if (atomic_read(&r->ref_count))
1346 if (r->com.state != RES_CQ_ALLOCATED)
1357 r->com.from_state = r->com.state;
1358 r->com.to_state = state;
1359 r->com.state = RES_CQ_BUSY;
1365 spin_unlock_irq(mlx4_tlock(dev));
1370 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1371 enum res_srq_states state, struct res_srq **srq)
1373 struct mlx4_priv *priv = mlx4_priv(dev);
1374 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1378 spin_lock_irq(mlx4_tlock(dev));
1379 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1382 else if (r->com.owner != slave)
1390 case RES_SRQ_ALLOCATED:
1391 if (r->com.state != RES_SRQ_HW)
1393 else if (atomic_read(&r->ref_count))
1398 if (r->com.state != RES_SRQ_ALLOCATED)
1407 r->com.from_state = r->com.state;
1408 r->com.to_state = state;
1409 r->com.state = RES_SRQ_BUSY;
1415 spin_unlock_irq(mlx4_tlock(dev));
1420 static void res_abort_move(struct mlx4_dev *dev, int slave,
1421 enum mlx4_resource type, int id)
1423 struct mlx4_priv *priv = mlx4_priv(dev);
1424 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1425 struct res_common *r;
1427 spin_lock_irq(mlx4_tlock(dev));
1428 r = res_tracker_lookup(&tracker->res_tree[type], id);
1429 if (r && (r->owner == slave))
1430 r->state = r->from_state;
1431 spin_unlock_irq(mlx4_tlock(dev));
1434 static void res_end_move(struct mlx4_dev *dev, int slave,
1435 enum mlx4_resource type, int id)
1437 struct mlx4_priv *priv = mlx4_priv(dev);
1438 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1439 struct res_common *r;
1441 spin_lock_irq(mlx4_tlock(dev));
1442 r = res_tracker_lookup(&tracker->res_tree[type], id);
1443 if (r && (r->owner == slave))
1444 r->state = r->to_state;
1445 spin_unlock_irq(mlx4_tlock(dev));
1448 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1450 return mlx4_is_qp_reserved(dev, qpn) &&
1451 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1454 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1456 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1459 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1460 u64 in_param, u64 *out_param)
1470 case RES_OP_RESERVE:
1471 count = get_param_l(&in_param) & 0xffffff;
1472 bf_qp = get_param_l(&in_param) >> 31;
1473 align = get_param_h(&in_param);
1474 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1478 err = __mlx4_qp_reserve_range(dev, count, align, &base, bf_qp);
1480 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1484 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1486 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1487 __mlx4_qp_release_range(dev, base, count);
1490 set_param_l(out_param, base);
1492 case RES_OP_MAP_ICM:
1493 qpn = get_param_l(&in_param) & 0x7fffff;
1494 if (valid_reserved(dev, slave, qpn)) {
1495 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1500 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1505 if (!fw_reserved(dev, qpn)) {
1506 err = __mlx4_qp_alloc_icm(dev, qpn);
1508 res_abort_move(dev, slave, RES_QP, qpn);
1513 res_end_move(dev, slave, RES_QP, qpn);
1523 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1524 u64 in_param, u64 *out_param)
1530 if (op != RES_OP_RESERVE_AND_MAP)
1533 order = get_param_l(&in_param);
1535 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1539 base = __mlx4_alloc_mtt_range(dev, order);
1541 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1545 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1547 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1548 __mlx4_free_mtt_range(dev, base, order);
1550 set_param_l(out_param, base);
1555 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1556 u64 in_param, u64 *out_param)
1561 struct res_mpt *mpt;
1564 case RES_OP_RESERVE:
1565 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1569 index = __mlx4_mr_reserve(dev);
1571 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1574 id = index & mpt_mask(dev);
1576 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1578 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1579 __mlx4_mr_release(dev, index);
1582 set_param_l(out_param, index);
1584 case RES_OP_MAP_ICM:
1585 index = get_param_l(&in_param);
1586 id = index & mpt_mask(dev);
1587 err = mr_res_start_move_to(dev, slave, id,
1588 RES_MPT_MAPPED, &mpt);
1592 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1594 res_abort_move(dev, slave, RES_MPT, id);
1598 res_end_move(dev, slave, RES_MPT, id);
1604 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1605 u64 in_param, u64 *out_param)
1611 case RES_OP_RESERVE_AND_MAP:
1612 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1616 err = __mlx4_cq_alloc_icm(dev, &cqn);
1618 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1622 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1624 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1625 __mlx4_cq_free_icm(dev, cqn);
1629 set_param_l(out_param, cqn);
1639 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1640 u64 in_param, u64 *out_param)
1646 case RES_OP_RESERVE_AND_MAP:
1647 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1651 err = __mlx4_srq_alloc_icm(dev, &srqn);
1653 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1657 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1659 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1660 __mlx4_srq_free_icm(dev, srqn);
1664 set_param_l(out_param, srqn);
1674 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1675 u8 smac_index, u64 *mac)
1677 struct mlx4_priv *priv = mlx4_priv(dev);
1678 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1679 struct list_head *mac_list =
1680 &tracker->slave_list[slave].res_list[RES_MAC];
1681 struct mac_res *res, *tmp;
1683 list_for_each_entry_safe(res, tmp, mac_list, list) {
1684 if (res->smac_index == smac_index && res->port == (u8) port) {
1692 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1694 struct mlx4_priv *priv = mlx4_priv(dev);
1695 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1696 struct list_head *mac_list =
1697 &tracker->slave_list[slave].res_list[RES_MAC];
1698 struct mac_res *res, *tmp;
1700 list_for_each_entry_safe(res, tmp, mac_list, list) {
1701 if (res->mac == mac && res->port == (u8) port) {
1702 /* mac found. update ref count */
1708 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1710 res = kzalloc(sizeof *res, GFP_KERNEL);
1712 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1716 res->port = (u8) port;
1717 res->smac_index = smac_index;
1719 list_add_tail(&res->list,
1720 &tracker->slave_list[slave].res_list[RES_MAC]);
1725 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1728 struct mlx4_priv *priv = mlx4_priv(dev);
1729 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1730 struct list_head *mac_list =
1731 &tracker->slave_list[slave].res_list[RES_MAC];
1732 struct mac_res *res, *tmp;
1734 list_for_each_entry_safe(res, tmp, mac_list, list) {
1735 if (res->mac == mac && res->port == (u8) port) {
1736 if (!--res->ref_count) {
1737 list_del(&res->list);
1738 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1746 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1748 struct mlx4_priv *priv = mlx4_priv(dev);
1749 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1750 struct list_head *mac_list =
1751 &tracker->slave_list[slave].res_list[RES_MAC];
1752 struct mac_res *res, *tmp;
1755 list_for_each_entry_safe(res, tmp, mac_list, list) {
1756 list_del(&res->list);
1757 /* dereference the mac the num times the slave referenced it */
1758 for (i = 0; i < res->ref_count; i++)
1759 __mlx4_unregister_mac(dev, res->port, res->mac);
1760 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1765 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1766 u64 in_param, u64 *out_param, int in_port)
1773 if (op != RES_OP_RESERVE_AND_MAP)
1776 port = !in_port ? get_param_l(out_param) : in_port;
1779 err = __mlx4_register_mac(dev, port, mac);
1782 set_param_l(out_param, err);
1787 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1789 __mlx4_unregister_mac(dev, port, mac);
1794 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1795 int port, int vlan_index)
1797 struct mlx4_priv *priv = mlx4_priv(dev);
1798 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1799 struct list_head *vlan_list =
1800 &tracker->slave_list[slave].res_list[RES_VLAN];
1801 struct vlan_res *res, *tmp;
1803 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1804 if (res->vlan == vlan && res->port == (u8) port) {
1805 /* vlan found. update ref count */
1811 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1813 res = kzalloc(sizeof(*res), GFP_KERNEL);
1815 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1819 res->port = (u8) port;
1820 res->vlan_index = vlan_index;
1822 list_add_tail(&res->list,
1823 &tracker->slave_list[slave].res_list[RES_VLAN]);
1828 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1831 struct mlx4_priv *priv = mlx4_priv(dev);
1832 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1833 struct list_head *vlan_list =
1834 &tracker->slave_list[slave].res_list[RES_VLAN];
1835 struct vlan_res *res, *tmp;
1837 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1838 if (res->vlan == vlan && res->port == (u8) port) {
1839 if (!--res->ref_count) {
1840 list_del(&res->list);
1841 mlx4_release_resource(dev, slave, RES_VLAN,
1850 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1852 struct mlx4_priv *priv = mlx4_priv(dev);
1853 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1854 struct list_head *vlan_list =
1855 &tracker->slave_list[slave].res_list[RES_VLAN];
1856 struct vlan_res *res, *tmp;
1859 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1860 list_del(&res->list);
1861 /* dereference the vlan the num times the slave referenced it */
1862 for (i = 0; i < res->ref_count; i++)
1863 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1864 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1869 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1870 u64 in_param, u64 *out_param, int port)
1879 if (op != RES_OP_RESERVE_AND_MAP)
1882 vlan = (u16) in_param;
1884 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1886 set_param_l(out_param, (u32) vlan_index);
1887 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1889 __mlx4_unregister_vlan(dev, port, vlan);
1894 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1895 u64 in_param, u64 *out_param)
1900 if (op != RES_OP_RESERVE)
1903 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1907 err = __mlx4_counter_alloc(dev, &index);
1909 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1913 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1915 __mlx4_counter_free(dev, index);
1916 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1918 set_param_l(out_param, index);
1924 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1925 u64 in_param, u64 *out_param)
1930 if (op != RES_OP_RESERVE)
1933 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1937 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1939 __mlx4_xrcd_free(dev, xrcdn);
1941 set_param_l(out_param, xrcdn);
1946 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1947 struct mlx4_vhcr *vhcr,
1948 struct mlx4_cmd_mailbox *inbox,
1949 struct mlx4_cmd_mailbox *outbox,
1950 struct mlx4_cmd_info *cmd)
1953 int alop = vhcr->op_modifier;
1955 switch (vhcr->in_modifier & 0xFF) {
1957 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1958 vhcr->in_param, &vhcr->out_param);
1962 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1963 vhcr->in_param, &vhcr->out_param);
1967 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1968 vhcr->in_param, &vhcr->out_param);
1972 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1973 vhcr->in_param, &vhcr->out_param);
1977 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1978 vhcr->in_param, &vhcr->out_param);
1982 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1983 vhcr->in_param, &vhcr->out_param,
1984 (vhcr->in_modifier >> 8) & 0xFF);
1988 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1989 vhcr->in_param, &vhcr->out_param,
1990 (vhcr->in_modifier >> 8) & 0xFF);
1994 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1995 vhcr->in_param, &vhcr->out_param);
1999 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2000 vhcr->in_param, &vhcr->out_param);
2011 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2020 case RES_OP_RESERVE:
2021 base = get_param_l(&in_param) & 0x7fffff;
2022 count = get_param_h(&in_param);
2023 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2026 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2027 __mlx4_qp_release_range(dev, base, count);
2029 case RES_OP_MAP_ICM:
2030 qpn = get_param_l(&in_param) & 0x7fffff;
2031 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2036 if (!fw_reserved(dev, qpn))
2037 __mlx4_qp_free_icm(dev, qpn);
2039 res_end_move(dev, slave, RES_QP, qpn);
2041 if (valid_reserved(dev, slave, qpn))
2042 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2051 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2052 u64 in_param, u64 *out_param)
2058 if (op != RES_OP_RESERVE_AND_MAP)
2061 base = get_param_l(&in_param);
2062 order = get_param_h(&in_param);
2063 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2065 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2066 __mlx4_free_mtt_range(dev, base, order);
2071 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2077 struct res_mpt *mpt;
2080 case RES_OP_RESERVE:
2081 index = get_param_l(&in_param);
2082 id = index & mpt_mask(dev);
2083 err = get_res(dev, slave, id, RES_MPT, &mpt);
2087 put_res(dev, slave, id, RES_MPT);
2089 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2092 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2093 __mlx4_mr_release(dev, index);
2095 case RES_OP_MAP_ICM:
2096 index = get_param_l(&in_param);
2097 id = index & mpt_mask(dev);
2098 err = mr_res_start_move_to(dev, slave, id,
2099 RES_MPT_RESERVED, &mpt);
2103 __mlx4_mr_free_icm(dev, mpt->key);
2104 res_end_move(dev, slave, RES_MPT, id);
2114 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2115 u64 in_param, u64 *out_param)
2121 case RES_OP_RESERVE_AND_MAP:
2122 cqn = get_param_l(&in_param);
2123 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2127 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2128 __mlx4_cq_free_icm(dev, cqn);
2139 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2140 u64 in_param, u64 *out_param)
2146 case RES_OP_RESERVE_AND_MAP:
2147 srqn = get_param_l(&in_param);
2148 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2152 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2153 __mlx4_srq_free_icm(dev, srqn);
2164 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2165 u64 in_param, u64 *out_param, int in_port)
2171 case RES_OP_RESERVE_AND_MAP:
2172 port = !in_port ? get_param_l(out_param) : in_port;
2173 mac_del_from_slave(dev, slave, in_param, port);
2174 __mlx4_unregister_mac(dev, port, in_param);
2185 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2186 u64 in_param, u64 *out_param, int port)
2191 case RES_OP_RESERVE_AND_MAP:
2194 vlan_del_from_slave(dev, slave, in_param, port);
2195 __mlx4_unregister_vlan(dev, port, in_param);
2205 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2206 u64 in_param, u64 *out_param)
2211 if (op != RES_OP_RESERVE)
2214 index = get_param_l(&in_param);
2215 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2219 __mlx4_counter_free(dev, index);
2220 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2225 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2226 u64 in_param, u64 *out_param)
2231 if (op != RES_OP_RESERVE)
2234 xrcdn = get_param_l(&in_param);
2235 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2239 __mlx4_xrcd_free(dev, xrcdn);
2244 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2245 struct mlx4_vhcr *vhcr,
2246 struct mlx4_cmd_mailbox *inbox,
2247 struct mlx4_cmd_mailbox *outbox,
2248 struct mlx4_cmd_info *cmd)
2251 int alop = vhcr->op_modifier;
2253 switch (vhcr->in_modifier & 0xFF) {
2255 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2260 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2261 vhcr->in_param, &vhcr->out_param);
2265 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2270 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2271 vhcr->in_param, &vhcr->out_param);
2275 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2276 vhcr->in_param, &vhcr->out_param);
2280 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2281 vhcr->in_param, &vhcr->out_param,
2282 (vhcr->in_modifier >> 8) & 0xFF);
2286 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2287 vhcr->in_param, &vhcr->out_param,
2288 (vhcr->in_modifier >> 8) & 0xFF);
2292 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2293 vhcr->in_param, &vhcr->out_param);
2297 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2298 vhcr->in_param, &vhcr->out_param);
2306 /* ugly but other choices are uglier */
2307 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2309 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2312 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2314 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2317 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2319 return be32_to_cpu(mpt->mtt_sz);
2322 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2324 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2327 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2329 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2332 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2334 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2335 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2336 int log_sq_sride = qpc->sq_size_stride & 7;
2337 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2338 int log_rq_stride = qpc->rq_size_stride & 7;
2339 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2340 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2341 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
2346 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2348 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2349 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2350 total_mem = sq_size + rq_size;
2352 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2358 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2359 int size, struct res_mtt *mtt)
2361 int res_start = mtt->com.res_id;
2362 int res_size = (1 << mtt->order);
2364 if (start < res_start || start + size > res_start + res_size)
2369 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2370 struct mlx4_vhcr *vhcr,
2371 struct mlx4_cmd_mailbox *inbox,
2372 struct mlx4_cmd_mailbox *outbox,
2373 struct mlx4_cmd_info *cmd)
2376 int index = vhcr->in_modifier;
2377 struct res_mtt *mtt;
2378 struct res_mpt *mpt;
2379 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2383 id = index & mpt_mask(dev);
2384 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2388 phys = mr_phys_mpt(inbox->buf);
2390 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2394 err = check_mtt_range(dev, slave, mtt_base,
2395 mr_get_mtt_size(inbox->buf), mtt);
2402 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2407 atomic_inc(&mtt->ref_count);
2408 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2411 res_end_move(dev, slave, RES_MPT, id);
2416 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2418 res_abort_move(dev, slave, RES_MPT, id);
2423 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2424 struct mlx4_vhcr *vhcr,
2425 struct mlx4_cmd_mailbox *inbox,
2426 struct mlx4_cmd_mailbox *outbox,
2427 struct mlx4_cmd_info *cmd)
2430 int index = vhcr->in_modifier;
2431 struct res_mpt *mpt;
2434 id = index & mpt_mask(dev);
2435 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2439 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2444 atomic_dec(&mpt->mtt->ref_count);
2446 res_end_move(dev, slave, RES_MPT, id);
2450 res_abort_move(dev, slave, RES_MPT, id);
2455 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2456 struct mlx4_vhcr *vhcr,
2457 struct mlx4_cmd_mailbox *inbox,
2458 struct mlx4_cmd_mailbox *outbox,
2459 struct mlx4_cmd_info *cmd)
2462 int index = vhcr->in_modifier;
2463 struct res_mpt *mpt;
2466 id = index & mpt_mask(dev);
2467 err = get_res(dev, slave, id, RES_MPT, &mpt);
2471 if (mpt->com.from_state != RES_MPT_HW) {
2476 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2479 put_res(dev, slave, id, RES_MPT);
2483 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2485 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2488 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2490 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2493 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2495 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2498 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2499 struct mlx4_qp_context *context)
2501 u32 qpn = vhcr->in_modifier & 0xffffff;
2504 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2507 /* adjust qkey in qp context */
2508 context->qkey = cpu_to_be32(qkey);
2511 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2512 struct mlx4_vhcr *vhcr,
2513 struct mlx4_cmd_mailbox *inbox,
2514 struct mlx4_cmd_mailbox *outbox,
2515 struct mlx4_cmd_info *cmd)
2518 int qpn = vhcr->in_modifier & 0x7fffff;
2519 struct res_mtt *mtt;
2521 struct mlx4_qp_context *qpc = inbox->buf + 8;
2522 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2523 int mtt_size = qp_get_mtt_size(qpc);
2526 int rcqn = qp_get_rcqn(qpc);
2527 int scqn = qp_get_scqn(qpc);
2528 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2529 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2530 struct res_srq *srq;
2531 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2533 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2536 qp->local_qpn = local_qpn;
2538 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2542 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2546 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2551 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2558 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2563 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2564 update_pkey_index(dev, slave, inbox);
2565 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2568 atomic_inc(&mtt->ref_count);
2570 atomic_inc(&rcq->ref_count);
2572 atomic_inc(&scq->ref_count);
2576 put_res(dev, slave, scqn, RES_CQ);
2579 atomic_inc(&srq->ref_count);
2580 put_res(dev, slave, srqn, RES_SRQ);
2583 put_res(dev, slave, rcqn, RES_CQ);
2584 put_res(dev, slave, mtt_base, RES_MTT);
2585 res_end_move(dev, slave, RES_QP, qpn);
2591 put_res(dev, slave, srqn, RES_SRQ);
2594 put_res(dev, slave, scqn, RES_CQ);
2596 put_res(dev, slave, rcqn, RES_CQ);
2598 put_res(dev, slave, mtt_base, RES_MTT);
2600 res_abort_move(dev, slave, RES_QP, qpn);
2605 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2607 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2610 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2612 int log_eq_size = eqc->log_eq_size & 0x1f;
2613 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2615 if (log_eq_size + 5 < page_shift)
2618 return 1 << (log_eq_size + 5 - page_shift);
2621 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2623 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2626 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2628 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2629 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2631 if (log_cq_size + 5 < page_shift)
2634 return 1 << (log_cq_size + 5 - page_shift);
2637 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2638 struct mlx4_vhcr *vhcr,
2639 struct mlx4_cmd_mailbox *inbox,
2640 struct mlx4_cmd_mailbox *outbox,
2641 struct mlx4_cmd_info *cmd)
2644 int eqn = vhcr->in_modifier;
2645 int res_id = (slave << 8) | eqn;
2646 struct mlx4_eq_context *eqc = inbox->buf;
2647 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2648 int mtt_size = eq_get_mtt_size(eqc);
2650 struct res_mtt *mtt;
2652 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2655 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2659 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2663 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2667 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2671 atomic_inc(&mtt->ref_count);
2673 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2674 res_end_move(dev, slave, RES_EQ, res_id);
2678 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2680 res_abort_move(dev, slave, RES_EQ, res_id);
2682 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2686 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2687 int len, struct res_mtt **res)
2689 struct mlx4_priv *priv = mlx4_priv(dev);
2690 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2691 struct res_mtt *mtt;
2694 spin_lock_irq(mlx4_tlock(dev));
2695 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2697 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2699 mtt->com.from_state = mtt->com.state;
2700 mtt->com.state = RES_MTT_BUSY;
2705 spin_unlock_irq(mlx4_tlock(dev));
2710 static int verify_qp_parameters(struct mlx4_dev *dev,
2711 struct mlx4_cmd_mailbox *inbox,
2712 enum qp_transition transition, u8 slave)
2715 struct mlx4_qp_context *qp_ctx;
2716 enum mlx4_qp_optpar optpar;
2720 qp_ctx = inbox->buf + 8;
2721 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2722 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2727 switch (transition) {
2728 case QP_TRANS_INIT2RTR:
2729 case QP_TRANS_RTR2RTS:
2730 case QP_TRANS_RTS2RTS:
2731 case QP_TRANS_SQD2SQD:
2732 case QP_TRANS_SQD2RTS:
2733 if (slave != mlx4_master_func_num(dev))
2734 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2735 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2736 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2737 num_gids = mlx4_get_slave_num_gids(dev, slave);
2740 if (qp_ctx->pri_path.mgid_index >= num_gids)
2743 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2744 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2745 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2746 num_gids = mlx4_get_slave_num_gids(dev, slave);
2749 if (qp_ctx->alt_path.mgid_index >= num_gids)
2765 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2766 struct mlx4_vhcr *vhcr,
2767 struct mlx4_cmd_mailbox *inbox,
2768 struct mlx4_cmd_mailbox *outbox,
2769 struct mlx4_cmd_info *cmd)
2771 struct mlx4_mtt mtt;
2772 __be64 *page_list = inbox->buf;
2773 u64 *pg_list = (u64 *)page_list;
2775 struct res_mtt *rmtt = NULL;
2776 int start = be64_to_cpu(page_list[0]);
2777 int npages = vhcr->in_modifier;
2780 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2784 /* Call the SW implementation of write_mtt:
2785 * - Prepare a dummy mtt struct
2786 * - Translate inbox contents to simple addresses in host endianess */
2787 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2788 we don't really use it */
2791 for (i = 0; i < npages; ++i)
2792 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2794 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2795 ((u64 *)page_list + 2));
2798 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2803 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2804 struct mlx4_vhcr *vhcr,
2805 struct mlx4_cmd_mailbox *inbox,
2806 struct mlx4_cmd_mailbox *outbox,
2807 struct mlx4_cmd_info *cmd)
2809 int eqn = vhcr->in_modifier;
2810 int res_id = eqn | (slave << 8);
2814 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2818 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2822 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2826 atomic_dec(&eq->mtt->ref_count);
2827 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2828 res_end_move(dev, slave, RES_EQ, res_id);
2829 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2834 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2836 res_abort_move(dev, slave, RES_EQ, res_id);
2841 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2843 struct mlx4_priv *priv = mlx4_priv(dev);
2844 struct mlx4_slave_event_eq_info *event_eq;
2845 struct mlx4_cmd_mailbox *mailbox;
2846 u32 in_modifier = 0;
2851 if (!priv->mfunc.master.slave_state)
2854 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2856 /* Create the event only if the slave is registered */
2857 if (event_eq->eqn < 0)
2860 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2861 res_id = (slave << 8) | event_eq->eqn;
2862 err = get_res(dev, slave, res_id, RES_EQ, &req);
2866 if (req->com.from_state != RES_EQ_HW) {
2871 mailbox = mlx4_alloc_cmd_mailbox(dev);
2872 if (IS_ERR(mailbox)) {
2873 err = PTR_ERR(mailbox);
2877 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2879 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2882 memcpy(mailbox->buf, (u8 *) eqe, 28);
2884 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2886 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2887 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2890 put_res(dev, slave, res_id, RES_EQ);
2891 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2892 mlx4_free_cmd_mailbox(dev, mailbox);
2896 put_res(dev, slave, res_id, RES_EQ);
2899 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2903 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2904 struct mlx4_vhcr *vhcr,
2905 struct mlx4_cmd_mailbox *inbox,
2906 struct mlx4_cmd_mailbox *outbox,
2907 struct mlx4_cmd_info *cmd)
2909 int eqn = vhcr->in_modifier;
2910 int res_id = eqn | (slave << 8);
2914 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2918 if (eq->com.from_state != RES_EQ_HW) {
2923 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2926 put_res(dev, slave, res_id, RES_EQ);
2930 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2931 struct mlx4_vhcr *vhcr,
2932 struct mlx4_cmd_mailbox *inbox,
2933 struct mlx4_cmd_mailbox *outbox,
2934 struct mlx4_cmd_info *cmd)
2937 int cqn = vhcr->in_modifier;
2938 struct mlx4_cq_context *cqc = inbox->buf;
2939 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2941 struct res_mtt *mtt;
2943 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2946 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2949 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2952 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2955 atomic_inc(&mtt->ref_count);
2957 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2958 res_end_move(dev, slave, RES_CQ, cqn);
2962 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2964 res_abort_move(dev, slave, RES_CQ, cqn);
2968 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2969 struct mlx4_vhcr *vhcr,
2970 struct mlx4_cmd_mailbox *inbox,
2971 struct mlx4_cmd_mailbox *outbox,
2972 struct mlx4_cmd_info *cmd)
2975 int cqn = vhcr->in_modifier;
2978 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2981 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2984 atomic_dec(&cq->mtt->ref_count);
2985 res_end_move(dev, slave, RES_CQ, cqn);
2989 res_abort_move(dev, slave, RES_CQ, cqn);
2993 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2994 struct mlx4_vhcr *vhcr,
2995 struct mlx4_cmd_mailbox *inbox,
2996 struct mlx4_cmd_mailbox *outbox,
2997 struct mlx4_cmd_info *cmd)
2999 int cqn = vhcr->in_modifier;
3003 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3007 if (cq->com.from_state != RES_CQ_HW)
3010 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3012 put_res(dev, slave, cqn, RES_CQ);
3017 static int handle_resize(struct mlx4_dev *dev, int slave,
3018 struct mlx4_vhcr *vhcr,
3019 struct mlx4_cmd_mailbox *inbox,
3020 struct mlx4_cmd_mailbox *outbox,
3021 struct mlx4_cmd_info *cmd,
3025 struct res_mtt *orig_mtt;
3026 struct res_mtt *mtt;
3027 struct mlx4_cq_context *cqc = inbox->buf;
3028 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3030 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3034 if (orig_mtt != cq->mtt) {
3039 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3043 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3046 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3049 atomic_dec(&orig_mtt->ref_count);
3050 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3051 atomic_inc(&mtt->ref_count);
3053 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3057 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3059 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3065 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3066 struct mlx4_vhcr *vhcr,
3067 struct mlx4_cmd_mailbox *inbox,
3068 struct mlx4_cmd_mailbox *outbox,
3069 struct mlx4_cmd_info *cmd)
3071 int cqn = vhcr->in_modifier;
3075 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3079 if (cq->com.from_state != RES_CQ_HW)
3082 if (vhcr->op_modifier == 0) {
3083 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3087 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3089 put_res(dev, slave, cqn, RES_CQ);
3094 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3096 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3097 int log_rq_stride = srqc->logstride & 7;
3098 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3100 if (log_srq_size + log_rq_stride + 4 < page_shift)
3103 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3106 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3107 struct mlx4_vhcr *vhcr,
3108 struct mlx4_cmd_mailbox *inbox,
3109 struct mlx4_cmd_mailbox *outbox,
3110 struct mlx4_cmd_info *cmd)
3113 int srqn = vhcr->in_modifier;
3114 struct res_mtt *mtt;
3115 struct res_srq *srq;
3116 struct mlx4_srq_context *srqc = inbox->buf;
3117 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3119 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3122 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3125 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3128 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3133 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3137 atomic_inc(&mtt->ref_count);
3139 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3140 res_end_move(dev, slave, RES_SRQ, srqn);
3144 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3146 res_abort_move(dev, slave, RES_SRQ, srqn);
3151 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3152 struct mlx4_vhcr *vhcr,
3153 struct mlx4_cmd_mailbox *inbox,
3154 struct mlx4_cmd_mailbox *outbox,
3155 struct mlx4_cmd_info *cmd)
3158 int srqn = vhcr->in_modifier;
3159 struct res_srq *srq;
3161 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3164 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3167 atomic_dec(&srq->mtt->ref_count);
3169 atomic_dec(&srq->cq->ref_count);
3170 res_end_move(dev, slave, RES_SRQ, srqn);
3175 res_abort_move(dev, slave, RES_SRQ, srqn);
3180 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3181 struct mlx4_vhcr *vhcr,
3182 struct mlx4_cmd_mailbox *inbox,
3183 struct mlx4_cmd_mailbox *outbox,
3184 struct mlx4_cmd_info *cmd)
3187 int srqn = vhcr->in_modifier;
3188 struct res_srq *srq;
3190 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3193 if (srq->com.from_state != RES_SRQ_HW) {
3197 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3199 put_res(dev, slave, srqn, RES_SRQ);
3203 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3204 struct mlx4_vhcr *vhcr,
3205 struct mlx4_cmd_mailbox *inbox,
3206 struct mlx4_cmd_mailbox *outbox,
3207 struct mlx4_cmd_info *cmd)
3210 int srqn = vhcr->in_modifier;
3211 struct res_srq *srq;
3213 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3217 if (srq->com.from_state != RES_SRQ_HW) {
3222 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3224 put_res(dev, slave, srqn, RES_SRQ);
3228 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3229 struct mlx4_vhcr *vhcr,
3230 struct mlx4_cmd_mailbox *inbox,
3231 struct mlx4_cmd_mailbox *outbox,
3232 struct mlx4_cmd_info *cmd)
3235 int qpn = vhcr->in_modifier & 0x7fffff;
3238 err = get_res(dev, slave, qpn, RES_QP, &qp);
3241 if (qp->com.from_state != RES_QP_HW) {
3246 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3248 put_res(dev, slave, qpn, RES_QP);
3252 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3253 struct mlx4_vhcr *vhcr,
3254 struct mlx4_cmd_mailbox *inbox,
3255 struct mlx4_cmd_mailbox *outbox,
3256 struct mlx4_cmd_info *cmd)
3258 struct mlx4_qp_context *context = inbox->buf + 8;
3259 adjust_proxy_tun_qkey(dev, vhcr, context);
3260 update_pkey_index(dev, slave, inbox);
3261 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3264 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3265 struct mlx4_qp_context *qpc,
3266 struct mlx4_cmd_mailbox *inbox)
3270 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3271 u8 sched = *(u8 *)(inbox->buf + 64);
3274 port = (sched >> 6 & 1) + 1;
3275 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3276 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3277 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3283 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3284 struct mlx4_vhcr *vhcr,
3285 struct mlx4_cmd_mailbox *inbox,
3286 struct mlx4_cmd_mailbox *outbox,
3287 struct mlx4_cmd_info *cmd)
3290 struct mlx4_qp_context *qpc = inbox->buf + 8;
3292 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3296 if (roce_verify_mac(dev, slave, qpc, inbox))
3299 update_pkey_index(dev, slave, inbox);
3300 update_gid(dev, inbox, (u8)slave);
3301 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3302 err = update_vport_qp_param(dev, inbox, slave);
3306 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3309 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3310 struct mlx4_vhcr *vhcr,
3311 struct mlx4_cmd_mailbox *inbox,
3312 struct mlx4_cmd_mailbox *outbox,
3313 struct mlx4_cmd_info *cmd)
3316 struct mlx4_qp_context *context = inbox->buf + 8;
3318 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3322 update_pkey_index(dev, slave, inbox);
3323 update_gid(dev, inbox, (u8)slave);
3324 adjust_proxy_tun_qkey(dev, vhcr, context);
3325 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3328 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3329 struct mlx4_vhcr *vhcr,
3330 struct mlx4_cmd_mailbox *inbox,
3331 struct mlx4_cmd_mailbox *outbox,
3332 struct mlx4_cmd_info *cmd)
3335 struct mlx4_qp_context *context = inbox->buf + 8;
3337 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3341 update_pkey_index(dev, slave, inbox);
3342 update_gid(dev, inbox, (u8)slave);
3343 adjust_proxy_tun_qkey(dev, vhcr, context);
3344 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3348 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3349 struct mlx4_vhcr *vhcr,
3350 struct mlx4_cmd_mailbox *inbox,
3351 struct mlx4_cmd_mailbox *outbox,
3352 struct mlx4_cmd_info *cmd)
3354 struct mlx4_qp_context *context = inbox->buf + 8;
3355 adjust_proxy_tun_qkey(dev, vhcr, context);
3356 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3359 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3360 struct mlx4_vhcr *vhcr,
3361 struct mlx4_cmd_mailbox *inbox,
3362 struct mlx4_cmd_mailbox *outbox,
3363 struct mlx4_cmd_info *cmd)
3366 struct mlx4_qp_context *context = inbox->buf + 8;
3368 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3372 adjust_proxy_tun_qkey(dev, vhcr, context);
3373 update_gid(dev, inbox, (u8)slave);
3374 update_pkey_index(dev, slave, inbox);
3375 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3378 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3379 struct mlx4_vhcr *vhcr,
3380 struct mlx4_cmd_mailbox *inbox,
3381 struct mlx4_cmd_mailbox *outbox,
3382 struct mlx4_cmd_info *cmd)
3385 struct mlx4_qp_context *context = inbox->buf + 8;
3387 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3391 adjust_proxy_tun_qkey(dev, vhcr, context);
3392 update_gid(dev, inbox, (u8)slave);
3393 update_pkey_index(dev, slave, inbox);
3394 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3397 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3398 struct mlx4_vhcr *vhcr,
3399 struct mlx4_cmd_mailbox *inbox,
3400 struct mlx4_cmd_mailbox *outbox,
3401 struct mlx4_cmd_info *cmd)
3404 int qpn = vhcr->in_modifier & 0x7fffff;
3407 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3410 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3414 atomic_dec(&qp->mtt->ref_count);
3415 atomic_dec(&qp->rcq->ref_count);
3416 atomic_dec(&qp->scq->ref_count);
3418 atomic_dec(&qp->srq->ref_count);
3419 res_end_move(dev, slave, RES_QP, qpn);
3423 res_abort_move(dev, slave, RES_QP, qpn);
3428 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3429 struct res_qp *rqp, u8 *gid)
3431 struct res_gid *res;
3433 list_for_each_entry(res, &rqp->mcg_list, list) {
3434 if (!memcmp(res->gid, gid, 16))
3440 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3441 u8 *gid, enum mlx4_protocol prot,
3442 enum mlx4_steer_type steer)
3444 struct res_gid *res;
3447 res = kzalloc(sizeof *res, GFP_KERNEL);
3451 spin_lock_irq(&rqp->mcg_spl);
3452 if (find_gid(dev, slave, rqp, gid)) {
3456 memcpy(res->gid, gid, 16);
3459 list_add_tail(&res->list, &rqp->mcg_list);
3462 spin_unlock_irq(&rqp->mcg_spl);
3467 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3468 u8 *gid, enum mlx4_protocol prot,
3469 enum mlx4_steer_type steer)
3471 struct res_gid *res;
3474 spin_lock_irq(&rqp->mcg_spl);
3475 res = find_gid(dev, slave, rqp, gid);
3476 if (!res || res->prot != prot || res->steer != steer)
3479 list_del(&res->list);
3483 spin_unlock_irq(&rqp->mcg_spl);
3488 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3489 struct mlx4_vhcr *vhcr,
3490 struct mlx4_cmd_mailbox *inbox,
3491 struct mlx4_cmd_mailbox *outbox,
3492 struct mlx4_cmd_info *cmd)
3494 struct mlx4_qp qp; /* dummy for calling attach/detach */
3495 u8 *gid = inbox->buf;
3496 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3500 int attach = vhcr->op_modifier;
3501 int block_loopback = vhcr->in_modifier >> 31;
3502 u8 steer_type_mask = 2;
3503 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3505 qpn = vhcr->in_modifier & 0xffffff;
3506 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3512 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
3516 err = mlx4_qp_attach_common(dev, &qp, gid,
3517 block_loopback, prot, type);
3521 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
3524 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
3527 put_res(dev, slave, qpn, RES_QP);
3531 /* ignore error return below, already in error */
3532 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
3534 put_res(dev, slave, qpn, RES_QP);
3540 * MAC validation for Flow Steering rules.
3541 * VF can attach rules only with a mac address which is assigned to it.
3544 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3545 struct list_head *rlist)
3547 struct mac_res *res, *tmp;
3550 /* make sure it isn't multicast or broadcast mac*/
3551 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3552 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3553 list_for_each_entry_safe(res, tmp, rlist, list) {
3554 be_mac = cpu_to_be64(res->mac << 16);
3555 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3558 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3559 eth_header->eth.dst_mac, slave);
3566 * In case of missing eth header, append eth header with a MAC address
3567 * assigned to the VF.
3569 static int add_eth_header(struct mlx4_dev *dev, int slave,
3570 struct mlx4_cmd_mailbox *inbox,
3571 struct list_head *rlist, int header_id)
3573 struct mac_res *res, *tmp;
3575 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3576 struct mlx4_net_trans_rule_hw_eth *eth_header;
3577 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3578 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3580 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3582 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3584 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3586 /* Clear a space in the inbox for eth header */
3587 switch (header_id) {
3588 case MLX4_NET_TRANS_RULE_ID_IPV4:
3590 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3591 memmove(ip_header, eth_header,
3592 sizeof(*ip_header) + sizeof(*l4_header));
3594 case MLX4_NET_TRANS_RULE_ID_TCP:
3595 case MLX4_NET_TRANS_RULE_ID_UDP:
3596 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3598 memmove(l4_header, eth_header, sizeof(*l4_header));
3603 list_for_each_entry_safe(res, tmp, rlist, list) {
3604 if (port == res->port) {
3605 be_mac = cpu_to_be64(res->mac << 16);
3610 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3615 memset(eth_header, 0, sizeof(*eth_header));
3616 eth_header->size = sizeof(*eth_header) >> 2;
3617 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3618 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3619 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3625 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3626 struct mlx4_vhcr *vhcr,
3627 struct mlx4_cmd_mailbox *inbox,
3628 struct mlx4_cmd_mailbox *outbox,
3629 struct mlx4_cmd_info *cmd)
3632 struct mlx4_priv *priv = mlx4_priv(dev);
3633 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3634 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3636 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3637 struct _rule_hw *rule_header;
3640 if (dev->caps.steering_mode !=
3641 MLX4_STEERING_MODE_DEVICE_MANAGED)
3644 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3645 rule_header = (struct _rule_hw *)(ctrl + 1);
3646 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3648 switch (header_id) {
3649 case MLX4_NET_TRANS_RULE_ID_ETH:
3650 if (validate_eth_header_mac(slave, rule_header, rlist))
3653 case MLX4_NET_TRANS_RULE_ID_IB:
3655 case MLX4_NET_TRANS_RULE_ID_IPV4:
3656 case MLX4_NET_TRANS_RULE_ID_TCP:
3657 case MLX4_NET_TRANS_RULE_ID_UDP:
3658 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3659 if (add_eth_header(dev, slave, inbox, rlist, header_id))
3661 vhcr->in_modifier +=
3662 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3665 pr_err("Corrupted mailbox.\n");
3669 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3670 vhcr->in_modifier, 0,
3671 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3676 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
3678 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3680 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3681 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3687 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3688 struct mlx4_vhcr *vhcr,
3689 struct mlx4_cmd_mailbox *inbox,
3690 struct mlx4_cmd_mailbox *outbox,
3691 struct mlx4_cmd_info *cmd)
3695 if (dev->caps.steering_mode !=
3696 MLX4_STEERING_MODE_DEVICE_MANAGED)
3699 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3701 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3705 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3706 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3712 BUSY_MAX_RETRIES = 10
3715 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3716 struct mlx4_vhcr *vhcr,
3717 struct mlx4_cmd_mailbox *inbox,
3718 struct mlx4_cmd_mailbox *outbox,
3719 struct mlx4_cmd_info *cmd)
3722 int index = vhcr->in_modifier & 0xffff;
3724 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3728 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3729 put_res(dev, slave, index, RES_COUNTER);
3733 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3735 struct res_gid *rgid;
3736 struct res_gid *tmp;
3737 struct mlx4_qp qp; /* dummy for calling attach/detach */
3739 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3740 qp.qpn = rqp->local_qpn;
3741 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
3743 list_del(&rgid->list);
3748 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3749 enum mlx4_resource type, int print)
3751 struct mlx4_priv *priv = mlx4_priv(dev);
3752 struct mlx4_resource_tracker *tracker =
3753 &priv->mfunc.master.res_tracker;
3754 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3755 struct res_common *r;
3756 struct res_common *tmp;
3760 spin_lock_irq(mlx4_tlock(dev));
3761 list_for_each_entry_safe(r, tmp, rlist, list) {
3762 if (r->owner == slave) {
3764 if (r->state == RES_ANY_BUSY) {
3767 "%s id 0x%llx is busy\n",
3772 r->from_state = r->state;
3773 r->state = RES_ANY_BUSY;
3779 spin_unlock_irq(mlx4_tlock(dev));
3784 static int move_all_busy(struct mlx4_dev *dev, int slave,
3785 enum mlx4_resource type)
3787 unsigned long begin;
3792 busy = _move_all_busy(dev, slave, type, 0);
3793 if (time_after(jiffies, begin + 5 * HZ))
3800 busy = _move_all_busy(dev, slave, type, 1);
3804 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3806 struct mlx4_priv *priv = mlx4_priv(dev);
3807 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3808 struct list_head *qp_list =
3809 &tracker->slave_list[slave].res_list[RES_QP];
3817 err = move_all_busy(dev, slave, RES_QP);
3819 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3820 "for slave %d\n", slave);
3822 spin_lock_irq(mlx4_tlock(dev));
3823 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3824 spin_unlock_irq(mlx4_tlock(dev));
3825 if (qp->com.owner == slave) {
3826 qpn = qp->com.res_id;
3827 detach_qp(dev, slave, qp);
3828 state = qp->com.from_state;
3829 while (state != 0) {
3831 case RES_QP_RESERVED:
3832 spin_lock_irq(mlx4_tlock(dev));
3833 rb_erase(&qp->com.node,
3834 &tracker->res_tree[RES_QP]);
3835 list_del(&qp->com.list);
3836 spin_unlock_irq(mlx4_tlock(dev));
3841 if (!valid_reserved(dev, slave, qpn))
3842 __mlx4_qp_free_icm(dev, qpn);
3843 state = RES_QP_RESERVED;
3847 err = mlx4_cmd(dev, in_param,
3850 MLX4_CMD_TIME_CLASS_A,
3853 mlx4_dbg(dev, "rem_slave_qps: failed"
3854 " to move slave %d qpn %d to"
3857 atomic_dec(&qp->rcq->ref_count);
3858 atomic_dec(&qp->scq->ref_count);
3859 atomic_dec(&qp->mtt->ref_count);
3861 atomic_dec(&qp->srq->ref_count);
3862 state = RES_QP_MAPPED;
3869 spin_lock_irq(mlx4_tlock(dev));
3871 spin_unlock_irq(mlx4_tlock(dev));
3874 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3876 struct mlx4_priv *priv = mlx4_priv(dev);
3877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3878 struct list_head *srq_list =
3879 &tracker->slave_list[slave].res_list[RES_SRQ];
3880 struct res_srq *srq;
3881 struct res_srq *tmp;
3888 err = move_all_busy(dev, slave, RES_SRQ);
3890 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3891 "busy for slave %d\n", slave);
3893 spin_lock_irq(mlx4_tlock(dev));
3894 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3895 spin_unlock_irq(mlx4_tlock(dev));
3896 if (srq->com.owner == slave) {
3897 srqn = srq->com.res_id;
3898 state = srq->com.from_state;
3899 while (state != 0) {
3901 case RES_SRQ_ALLOCATED:
3902 __mlx4_srq_free_icm(dev, srqn);
3903 spin_lock_irq(mlx4_tlock(dev));
3904 rb_erase(&srq->com.node,
3905 &tracker->res_tree[RES_SRQ]);
3906 list_del(&srq->com.list);
3907 spin_unlock_irq(mlx4_tlock(dev));
3914 err = mlx4_cmd(dev, in_param, srqn, 1,
3916 MLX4_CMD_TIME_CLASS_A,
3919 mlx4_dbg(dev, "rem_slave_srqs: failed"
3920 " to move slave %d srq %d to"
3924 atomic_dec(&srq->mtt->ref_count);
3926 atomic_dec(&srq->cq->ref_count);
3927 state = RES_SRQ_ALLOCATED;
3935 spin_lock_irq(mlx4_tlock(dev));
3937 spin_unlock_irq(mlx4_tlock(dev));
3940 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3942 struct mlx4_priv *priv = mlx4_priv(dev);
3943 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3944 struct list_head *cq_list =
3945 &tracker->slave_list[slave].res_list[RES_CQ];
3954 err = move_all_busy(dev, slave, RES_CQ);
3956 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3957 "busy for slave %d\n", slave);
3959 spin_lock_irq(mlx4_tlock(dev));
3960 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3961 spin_unlock_irq(mlx4_tlock(dev));
3962 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3963 cqn = cq->com.res_id;
3964 state = cq->com.from_state;
3965 while (state != 0) {
3967 case RES_CQ_ALLOCATED:
3968 __mlx4_cq_free_icm(dev, cqn);
3969 spin_lock_irq(mlx4_tlock(dev));
3970 rb_erase(&cq->com.node,
3971 &tracker->res_tree[RES_CQ]);
3972 list_del(&cq->com.list);
3973 spin_unlock_irq(mlx4_tlock(dev));
3980 err = mlx4_cmd(dev, in_param, cqn, 1,
3982 MLX4_CMD_TIME_CLASS_A,
3985 mlx4_dbg(dev, "rem_slave_cqs: failed"
3986 " to move slave %d cq %d to"
3989 atomic_dec(&cq->mtt->ref_count);
3990 state = RES_CQ_ALLOCATED;
3998 spin_lock_irq(mlx4_tlock(dev));
4000 spin_unlock_irq(mlx4_tlock(dev));
4003 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4005 struct mlx4_priv *priv = mlx4_priv(dev);
4006 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4007 struct list_head *mpt_list =
4008 &tracker->slave_list[slave].res_list[RES_MPT];
4009 struct res_mpt *mpt;
4010 struct res_mpt *tmp;
4017 err = move_all_busy(dev, slave, RES_MPT);
4019 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4020 "busy for slave %d\n", slave);
4022 spin_lock_irq(mlx4_tlock(dev));
4023 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4024 spin_unlock_irq(mlx4_tlock(dev));
4025 if (mpt->com.owner == slave) {
4026 mptn = mpt->com.res_id;
4027 state = mpt->com.from_state;
4028 while (state != 0) {
4030 case RES_MPT_RESERVED:
4031 __mlx4_mr_release(dev, mpt->key);
4032 spin_lock_irq(mlx4_tlock(dev));
4033 rb_erase(&mpt->com.node,
4034 &tracker->res_tree[RES_MPT]);
4035 list_del(&mpt->com.list);
4036 spin_unlock_irq(mlx4_tlock(dev));
4041 case RES_MPT_MAPPED:
4042 __mlx4_mr_free_icm(dev, mpt->key);
4043 state = RES_MPT_RESERVED;
4048 err = mlx4_cmd(dev, in_param, mptn, 0,
4050 MLX4_CMD_TIME_CLASS_A,
4053 mlx4_dbg(dev, "rem_slave_mrs: failed"
4054 " to move slave %d mpt %d to"
4058 atomic_dec(&mpt->mtt->ref_count);
4059 state = RES_MPT_MAPPED;
4066 spin_lock_irq(mlx4_tlock(dev));
4068 spin_unlock_irq(mlx4_tlock(dev));
4071 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4073 struct mlx4_priv *priv = mlx4_priv(dev);
4074 struct mlx4_resource_tracker *tracker =
4075 &priv->mfunc.master.res_tracker;
4076 struct list_head *mtt_list =
4077 &tracker->slave_list[slave].res_list[RES_MTT];
4078 struct res_mtt *mtt;
4079 struct res_mtt *tmp;
4085 err = move_all_busy(dev, slave, RES_MTT);
4087 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4088 "busy for slave %d\n", slave);
4090 spin_lock_irq(mlx4_tlock(dev));
4091 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4092 spin_unlock_irq(mlx4_tlock(dev));
4093 if (mtt->com.owner == slave) {
4094 base = mtt->com.res_id;
4095 state = mtt->com.from_state;
4096 while (state != 0) {
4098 case RES_MTT_ALLOCATED:
4099 __mlx4_free_mtt_range(dev, base,
4101 spin_lock_irq(mlx4_tlock(dev));
4102 rb_erase(&mtt->com.node,
4103 &tracker->res_tree[RES_MTT]);
4104 list_del(&mtt->com.list);
4105 spin_unlock_irq(mlx4_tlock(dev));
4115 spin_lock_irq(mlx4_tlock(dev));
4117 spin_unlock_irq(mlx4_tlock(dev));
4120 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4122 struct mlx4_priv *priv = mlx4_priv(dev);
4123 struct mlx4_resource_tracker *tracker =
4124 &priv->mfunc.master.res_tracker;
4125 struct list_head *fs_rule_list =
4126 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4127 struct res_fs_rule *fs_rule;
4128 struct res_fs_rule *tmp;
4133 err = move_all_busy(dev, slave, RES_FS_RULE);
4135 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4138 spin_lock_irq(mlx4_tlock(dev));
4139 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4140 spin_unlock_irq(mlx4_tlock(dev));
4141 if (fs_rule->com.owner == slave) {
4142 base = fs_rule->com.res_id;
4143 state = fs_rule->com.from_state;
4144 while (state != 0) {
4146 case RES_FS_RULE_ALLOCATED:
4148 err = mlx4_cmd(dev, base, 0, 0,
4149 MLX4_QP_FLOW_STEERING_DETACH,
4150 MLX4_CMD_TIME_CLASS_A,
4153 spin_lock_irq(mlx4_tlock(dev));
4154 rb_erase(&fs_rule->com.node,
4155 &tracker->res_tree[RES_FS_RULE]);
4156 list_del(&fs_rule->com.list);
4157 spin_unlock_irq(mlx4_tlock(dev));
4167 spin_lock_irq(mlx4_tlock(dev));
4169 spin_unlock_irq(mlx4_tlock(dev));
4172 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4174 struct mlx4_priv *priv = mlx4_priv(dev);
4175 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4176 struct list_head *eq_list =
4177 &tracker->slave_list[slave].res_list[RES_EQ];
4184 struct mlx4_cmd_mailbox *mailbox;
4186 err = move_all_busy(dev, slave, RES_EQ);
4188 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4189 "busy for slave %d\n", slave);
4191 spin_lock_irq(mlx4_tlock(dev));
4192 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4193 spin_unlock_irq(mlx4_tlock(dev));
4194 if (eq->com.owner == slave) {
4195 eqn = eq->com.res_id;
4196 state = eq->com.from_state;
4197 while (state != 0) {
4199 case RES_EQ_RESERVED:
4200 spin_lock_irq(mlx4_tlock(dev));
4201 rb_erase(&eq->com.node,
4202 &tracker->res_tree[RES_EQ]);
4203 list_del(&eq->com.list);
4204 spin_unlock_irq(mlx4_tlock(dev));
4210 mailbox = mlx4_alloc_cmd_mailbox(dev);
4211 if (IS_ERR(mailbox)) {
4215 err = mlx4_cmd_box(dev, slave, 0,
4218 MLX4_CMD_TIME_CLASS_A,
4221 mlx4_dbg(dev, "rem_slave_eqs: failed"
4222 " to move slave %d eqs %d to"
4223 " SW ownership\n", slave, eqn);
4224 mlx4_free_cmd_mailbox(dev, mailbox);
4225 atomic_dec(&eq->mtt->ref_count);
4226 state = RES_EQ_RESERVED;
4234 spin_lock_irq(mlx4_tlock(dev));
4236 spin_unlock_irq(mlx4_tlock(dev));
4239 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4241 struct mlx4_priv *priv = mlx4_priv(dev);
4242 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4243 struct list_head *counter_list =
4244 &tracker->slave_list[slave].res_list[RES_COUNTER];
4245 struct res_counter *counter;
4246 struct res_counter *tmp;
4250 err = move_all_busy(dev, slave, RES_COUNTER);
4252 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4253 "busy for slave %d\n", slave);
4255 spin_lock_irq(mlx4_tlock(dev));
4256 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4257 if (counter->com.owner == slave) {
4258 index = counter->com.res_id;
4259 rb_erase(&counter->com.node,
4260 &tracker->res_tree[RES_COUNTER]);
4261 list_del(&counter->com.list);
4263 __mlx4_counter_free(dev, index);
4266 spin_unlock_irq(mlx4_tlock(dev));
4269 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4271 struct mlx4_priv *priv = mlx4_priv(dev);
4272 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4273 struct list_head *xrcdn_list =
4274 &tracker->slave_list[slave].res_list[RES_XRCD];
4275 struct res_xrcdn *xrcd;
4276 struct res_xrcdn *tmp;
4280 err = move_all_busy(dev, slave, RES_XRCD);
4282 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4283 "busy for slave %d\n", slave);
4285 spin_lock_irq(mlx4_tlock(dev));
4286 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4287 if (xrcd->com.owner == slave) {
4288 xrcdn = xrcd->com.res_id;
4289 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4290 list_del(&xrcd->com.list);
4292 __mlx4_xrcd_free(dev, xrcdn);
4295 spin_unlock_irq(mlx4_tlock(dev));
4298 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4300 struct mlx4_priv *priv = mlx4_priv(dev);
4302 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4303 rem_slave_macs(dev, slave);
4304 rem_slave_vlans(dev, slave);
4305 rem_slave_qps(dev, slave);
4306 rem_slave_srqs(dev, slave);
4307 rem_slave_cqs(dev, slave);
4308 rem_slave_mrs(dev, slave);
4309 rem_slave_eqs(dev, slave);
4310 rem_slave_mtts(dev, slave);
4311 rem_slave_counters(dev, slave);
4312 rem_slave_xrcdns(dev, slave);
4313 rem_slave_fs_rule(dev, slave);
4314 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);