2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
61 struct list_head list;
69 struct list_head list;
84 struct list_head list;
86 enum mlx4_protocol prot;
87 enum mlx4_steer_type steer;
92 RES_QP_BUSY = RES_ANY_BUSY,
94 /* QP number was allocated */
97 /* ICM memory for QP context was mapped */
100 /* QP is in hw ownership */
105 struct res_common com;
110 struct list_head mcg_list;
115 /* saved qp params before VST enforcement in order to restore on VGT */
125 enum res_mtt_states {
126 RES_MTT_BUSY = RES_ANY_BUSY,
130 static inline const char *mtt_states_str(enum res_mtt_states state)
133 case RES_MTT_BUSY: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
140 struct res_common com;
145 enum res_mpt_states {
146 RES_MPT_BUSY = RES_ANY_BUSY,
153 struct res_common com;
159 RES_EQ_BUSY = RES_ANY_BUSY,
165 struct res_common com;
170 RES_CQ_BUSY = RES_ANY_BUSY,
176 struct res_common com;
181 enum res_srq_states {
182 RES_SRQ_BUSY = RES_ANY_BUSY,
188 struct res_common com;
194 enum res_counter_states {
195 RES_COUNTER_BUSY = RES_ANY_BUSY,
196 RES_COUNTER_ALLOCATED,
200 struct res_common com;
204 enum res_xrcdn_states {
205 RES_XRCD_BUSY = RES_ANY_BUSY,
210 struct res_common com;
214 enum res_fs_rule_states {
215 RES_FS_RULE_BUSY = RES_ANY_BUSY,
216 RES_FS_RULE_ALLOCATED,
220 struct res_common com;
224 static int mlx4_is_eth(struct mlx4_dev *dev, int port)
226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
229 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
231 struct rb_node *node = root->rb_node;
234 struct res_common *res = container_of(node, struct res_common,
237 if (res_id < res->res_id)
238 node = node->rb_left;
239 else if (res_id > res->res_id)
240 node = node->rb_right;
247 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
249 struct rb_node **new = &(root->rb_node), *parent = NULL;
251 /* Figure out where to put new node */
253 struct res_common *this = container_of(*new, struct res_common,
257 if (res->res_id < this->res_id)
258 new = &((*new)->rb_left);
259 else if (res->res_id > this->res_id)
260 new = &((*new)->rb_right);
265 /* Add new node and rebalance tree. */
266 rb_link_node(&res->node, parent, new);
267 rb_insert_color(&res->node, root);
282 static const char *ResourceType(enum mlx4_resource rt)
285 case RES_QP: return "RES_QP";
286 case RES_CQ: return "RES_CQ";
287 case RES_SRQ: return "RES_SRQ";
288 case RES_MPT: return "RES_MPT";
289 case RES_MTT: return "RES_MTT";
290 case RES_MAC: return "RES_MAC";
291 case RES_VLAN: return "RES_VLAN";
292 case RES_EQ: return "RES_EQ";
293 case RES_COUNTER: return "RES_COUNTER";
294 case RES_FS_RULE: return "RES_FS_RULE";
295 case RES_XRCD: return "RES_XRCD";
296 default: return "Unknown resource type !!!";
300 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
301 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302 enum mlx4_resource res_type, int count,
305 struct mlx4_priv *priv = mlx4_priv(dev);
306 struct resource_allocator *res_alloc =
307 &priv->mfunc.master.res_tracker.res_alloc[res_type];
309 int allocated, free, reserved, guaranteed, from_free;
311 spin_lock(&res_alloc->alloc_lock);
312 allocated = (port > 0) ?
313 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
314 res_alloc->allocated[slave];
315 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
317 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
318 res_alloc->res_reserved;
319 guaranteed = res_alloc->guaranteed[slave];
321 if (allocated + count > res_alloc->quota[slave])
324 if (allocated + count <= guaranteed) {
327 /* portion may need to be obtained from free area */
328 if (guaranteed - allocated > 0)
329 from_free = count - (guaranteed - allocated);
333 if (free - from_free > reserved)
338 /* grant the request */
340 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
341 res_alloc->res_port_free[port - 1] -= count;
343 res_alloc->allocated[slave] += count;
344 res_alloc->res_free -= count;
349 spin_unlock(&res_alloc->alloc_lock);
354 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
355 enum mlx4_resource res_type, int count,
358 struct mlx4_priv *priv = mlx4_priv(dev);
359 struct resource_allocator *res_alloc =
360 &priv->mfunc.master.res_tracker.res_alloc[res_type];
362 spin_lock(&res_alloc->alloc_lock);
364 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
365 res_alloc->res_port_free[port - 1] += count;
367 res_alloc->allocated[slave] -= count;
368 res_alloc->res_free += count;
371 spin_unlock(&res_alloc->alloc_lock);
375 static inline void initialize_res_quotas(struct mlx4_dev *dev,
376 struct resource_allocator *res_alloc,
377 enum mlx4_resource res_type,
378 int vf, int num_instances)
380 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
381 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
382 if (vf == mlx4_master_func_num(dev)) {
383 res_alloc->res_free = num_instances;
384 if (res_type == RES_MTT) {
385 /* reserved mtts will be taken out of the PF allocation */
386 res_alloc->res_free += dev->caps.reserved_mtts;
387 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
388 res_alloc->quota[vf] += dev->caps.reserved_mtts;
393 void mlx4_init_quotas(struct mlx4_dev *dev)
395 struct mlx4_priv *priv = mlx4_priv(dev);
398 /* quotas for VFs are initialized in mlx4_slave_cap */
399 if (mlx4_is_slave(dev))
402 if (!mlx4_is_mfunc(dev)) {
403 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
404 mlx4_num_reserved_sqps(dev);
405 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
406 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
407 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
408 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
412 pf = mlx4_master_func_num(dev);
414 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
416 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
418 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
420 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
422 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
424 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
426 struct mlx4_priv *priv = mlx4_priv(dev);
430 priv->mfunc.master.res_tracker.slave_list =
431 kzalloc(dev->num_slaves * sizeof(struct slave_list),
433 if (!priv->mfunc.master.res_tracker.slave_list)
436 for (i = 0 ; i < dev->num_slaves; i++) {
437 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
438 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
439 slave_list[i].res_list[t]);
440 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
443 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
445 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
446 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
448 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
449 struct resource_allocator *res_alloc =
450 &priv->mfunc.master.res_tracker.res_alloc[i];
451 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
452 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
453 if (i == RES_MAC || i == RES_VLAN)
454 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
455 (dev->num_vfs + 1) * sizeof(int),
458 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
460 if (!res_alloc->quota || !res_alloc->guaranteed ||
461 !res_alloc->allocated)
464 spin_lock_init(&res_alloc->alloc_lock);
465 for (t = 0; t < dev->num_vfs + 1; t++) {
468 initialize_res_quotas(dev, res_alloc, RES_QP,
469 t, dev->caps.num_qps -
470 dev->caps.reserved_qps -
471 mlx4_num_reserved_sqps(dev));
474 initialize_res_quotas(dev, res_alloc, RES_CQ,
475 t, dev->caps.num_cqs -
476 dev->caps.reserved_cqs);
479 initialize_res_quotas(dev, res_alloc, RES_SRQ,
480 t, dev->caps.num_srqs -
481 dev->caps.reserved_srqs);
484 initialize_res_quotas(dev, res_alloc, RES_MPT,
485 t, dev->caps.num_mpts -
486 dev->caps.reserved_mrws);
489 initialize_res_quotas(dev, res_alloc, RES_MTT,
490 t, dev->caps.num_mtts -
491 dev->caps.reserved_mtts);
494 if (t == mlx4_master_func_num(dev)) {
495 res_alloc->quota[t] =
496 MLX4_MAX_MAC_NUM - 2 * dev->num_vfs;
497 res_alloc->guaranteed[t] = res_alloc->quota[t];
498 for (j = 0; j < MLX4_MAX_PORTS; j++)
499 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
501 res_alloc->quota[t] = 2;
502 res_alloc->guaranteed[t] = 2;
506 if (t == mlx4_master_func_num(dev)) {
507 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
508 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
509 for (j = 0; j < MLX4_MAX_PORTS; j++)
510 res_alloc->res_port_free[j] =
513 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
514 res_alloc->guaranteed[t] = 0;
518 res_alloc->quota[t] = dev->caps.max_counters;
519 res_alloc->guaranteed[t] = 0;
520 if (t == mlx4_master_func_num(dev))
521 res_alloc->res_free = res_alloc->quota[t];
526 if (i == RES_MAC || i == RES_VLAN) {
527 for (j = 0; j < MLX4_MAX_PORTS; j++)
528 res_alloc->res_port_rsvd[j] +=
529 res_alloc->guaranteed[t];
531 res_alloc->res_reserved += res_alloc->guaranteed[t];
535 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
539 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
540 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
541 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
542 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
543 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
544 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
545 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
550 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
551 enum mlx4_res_tracker_free_type type)
553 struct mlx4_priv *priv = mlx4_priv(dev);
556 if (priv->mfunc.master.res_tracker.slave_list) {
557 if (type != RES_TR_FREE_STRUCTS_ONLY) {
558 for (i = 0; i < dev->num_slaves; i++) {
559 if (type == RES_TR_FREE_ALL ||
560 dev->caps.function != i)
561 mlx4_delete_all_resources_for_slave(dev, i);
563 /* free master's vlans */
564 i = dev->caps.function;
565 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
566 rem_slave_vlans(dev, i);
567 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
570 if (type != RES_TR_FREE_SLAVES_ONLY) {
571 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
572 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
573 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
574 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
575 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
576 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
577 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
579 kfree(priv->mfunc.master.res_tracker.slave_list);
580 priv->mfunc.master.res_tracker.slave_list = NULL;
585 static void update_pkey_index(struct mlx4_dev *dev, int slave,
586 struct mlx4_cmd_mailbox *inbox)
588 u8 sched = *(u8 *)(inbox->buf + 64);
589 u8 orig_index = *(u8 *)(inbox->buf + 35);
591 struct mlx4_priv *priv = mlx4_priv(dev);
594 port = (sched >> 6 & 1) + 1;
596 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
597 *(u8 *)(inbox->buf + 35) = new_index;
600 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
603 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
604 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
605 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
608 if (MLX4_QP_ST_UD == ts) {
609 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
610 if (mlx4_is_eth(dev, port))
611 qp_ctx->pri_path.mgid_index = mlx4_get_base_gid_ix(dev, slave) | 0x80;
613 qp_ctx->pri_path.mgid_index = 0x80 | slave;
615 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
616 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
617 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
618 if (mlx4_is_eth(dev, port)) {
619 qp_ctx->pri_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
620 qp_ctx->pri_path.mgid_index &= 0x7f;
622 qp_ctx->pri_path.mgid_index = slave & 0x7F;
625 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
626 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
627 if (mlx4_is_eth(dev, port)) {
628 qp_ctx->alt_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
629 qp_ctx->alt_path.mgid_index &= 0x7f;
631 qp_ctx->alt_path.mgid_index = slave & 0x7F;
637 static int check_counter_index_validity(struct mlx4_dev *dev, int slave, int port, int idx)
639 struct mlx4_priv *priv = mlx4_priv(dev);
640 struct counter_index *counter, *tmp_counter;
643 list_for_each_entry_safe(counter, tmp_counter,
644 &priv->counters_table.global_port_list[port - 1],
646 if (counter->index == idx)
651 list_for_each_entry_safe(counter, tmp_counter,
652 &priv->counters_table.vf_list[slave - 1][port - 1],
654 if (counter->index == idx)
661 static int update_vport_qp_param(struct mlx4_dev *dev,
662 struct mlx4_cmd_mailbox *inbox,
665 struct mlx4_qp_context *qpc = inbox->buf + 8;
666 struct mlx4_vport_oper_state *vp_oper;
667 struct mlx4_priv *priv;
671 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
672 priv = mlx4_priv(dev);
673 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
674 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
676 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH &&
677 qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX) {
678 if (check_counter_index_validity(dev, slave, port,
679 qpc->pri_path.counter_index))
683 mlx4_dbg(dev, "%s: QP counter_index %d for slave %d port %d\n",
684 __func__, qpc->pri_path.counter_index, slave, port);
686 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) &&
687 dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH &&
688 !mlx4_is_qp_reserved(dev, qpn) &&
689 qp_type == MLX4_QP_ST_MLX &&
690 qpc->pri_path.counter_index != 0xFF) {
691 /* disable multicast loopback to qp with same counter */
692 qpc->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB;
693 qpc->pri_path.vlan_control |=
694 MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER;
697 if (MLX4_VGT != vp_oper->state.default_vlan) {
698 /* the reserved QPs (special, proxy, tunnel)
699 * do not operate over vlans
701 if (mlx4_is_qp_reserved(dev, qpn))
704 /* force strip vlan by clear vsd */
705 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
706 /* preserve IF_COUNTER flag */
707 qpc->pri_path.vlan_control &=
708 MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER;
709 if (MLX4_QP_ST_RC != qp_type) {
710 if (0 != vp_oper->state.default_vlan) {
711 qpc->pri_path.vlan_control |=
712 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
713 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
714 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
715 } else { /* priority tagged */
716 qpc->pri_path.vlan_control |=
717 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
718 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
721 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
722 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
723 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
724 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
725 qpc->pri_path.sched_queue &= 0xC7;
726 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
728 if (vp_oper->state.spoofchk) {
729 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
730 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
735 static int mpt_mask(struct mlx4_dev *dev)
737 return dev->caps.num_mpts - 1;
740 static void *find_res(struct mlx4_dev *dev, u64 res_id,
741 enum mlx4_resource type)
743 struct mlx4_priv *priv = mlx4_priv(dev);
745 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
749 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
750 enum mlx4_resource type,
753 struct res_common *r;
756 spin_lock_irq(mlx4_tlock(dev));
757 r = find_res(dev, res_id, type);
763 if (r->state == RES_ANY_BUSY) {
768 if (r->owner != slave) {
773 r->from_state = r->state;
774 r->state = RES_ANY_BUSY;
777 *((struct res_common **)res) = r;
780 spin_unlock_irq(mlx4_tlock(dev));
784 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
785 enum mlx4_resource type,
786 u64 res_id, int *slave)
789 struct res_common *r;
795 spin_lock(mlx4_tlock(dev));
797 r = find_res(dev, id, type);
802 spin_unlock(mlx4_tlock(dev));
807 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
808 enum mlx4_resource type)
810 struct res_common *r;
812 spin_lock_irq(mlx4_tlock(dev));
813 r = find_res(dev, res_id, type);
815 r->state = r->from_state;
816 spin_unlock_irq(mlx4_tlock(dev));
819 static struct res_common *alloc_qp_tr(int id)
823 ret = kzalloc(sizeof *ret, GFP_KERNEL);
827 ret->com.res_id = id;
828 ret->com.state = RES_QP_RESERVED;
830 INIT_LIST_HEAD(&ret->mcg_list);
831 spin_lock_init(&ret->mcg_spl);
832 atomic_set(&ret->ref_count, 0);
837 static struct res_common *alloc_mtt_tr(int id, int order)
841 ret = kzalloc(sizeof *ret, GFP_KERNEL);
845 ret->com.res_id = id;
847 ret->com.state = RES_MTT_ALLOCATED;
848 atomic_set(&ret->ref_count, 0);
853 static struct res_common *alloc_mpt_tr(int id, int key)
857 ret = kzalloc(sizeof *ret, GFP_KERNEL);
861 ret->com.res_id = id;
862 ret->com.state = RES_MPT_RESERVED;
868 static struct res_common *alloc_eq_tr(int id)
872 ret = kzalloc(sizeof *ret, GFP_KERNEL);
876 ret->com.res_id = id;
877 ret->com.state = RES_EQ_RESERVED;
882 static struct res_common *alloc_cq_tr(int id)
886 ret = kzalloc(sizeof *ret, GFP_KERNEL);
890 ret->com.res_id = id;
891 ret->com.state = RES_CQ_ALLOCATED;
892 atomic_set(&ret->ref_count, 0);
897 static struct res_common *alloc_srq_tr(int id)
901 ret = kzalloc(sizeof *ret, GFP_KERNEL);
905 ret->com.res_id = id;
906 ret->com.state = RES_SRQ_ALLOCATED;
907 atomic_set(&ret->ref_count, 0);
912 static struct res_common *alloc_counter_tr(int id)
914 struct res_counter *ret;
916 ret = kzalloc(sizeof *ret, GFP_KERNEL);
920 ret->com.res_id = id;
921 ret->com.state = RES_COUNTER_ALLOCATED;
926 static struct res_common *alloc_xrcdn_tr(int id)
928 struct res_xrcdn *ret;
930 ret = kzalloc(sizeof *ret, GFP_KERNEL);
934 ret->com.res_id = id;
935 ret->com.state = RES_XRCD_ALLOCATED;
940 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
942 struct res_fs_rule *ret;
944 ret = kzalloc(sizeof *ret, GFP_KERNEL);
948 ret->com.res_id = id;
949 ret->com.state = RES_FS_RULE_ALLOCATED;
954 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
957 struct res_common *ret;
961 ret = alloc_qp_tr(id);
964 ret = alloc_mpt_tr(id, extra);
967 ret = alloc_mtt_tr(id, extra);
970 ret = alloc_eq_tr(id);
973 ret = alloc_cq_tr(id);
976 ret = alloc_srq_tr(id);
979 printk(KERN_ERR "implementation missing\n");
982 ret = alloc_counter_tr(id);
985 ret = alloc_xrcdn_tr(id);
988 ret = alloc_fs_rule_tr(id, extra);
999 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1000 enum mlx4_resource type, int extra)
1004 struct mlx4_priv *priv = mlx4_priv(dev);
1005 struct res_common **res_arr;
1006 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1007 struct rb_root *root = &tracker->res_tree[type];
1009 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1013 for (i = 0; i < count; ++i) {
1014 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1016 for (--i; i >= 0; --i)
1024 spin_lock_irq(mlx4_tlock(dev));
1025 for (i = 0; i < count; ++i) {
1026 if (find_res(dev, base + i, type)) {
1030 err = res_tracker_insert(root, res_arr[i]);
1033 list_add_tail(&res_arr[i]->list,
1034 &tracker->slave_list[slave].res_list[type]);
1036 spin_unlock_irq(mlx4_tlock(dev));
1042 for (--i; i >= 0; --i) {
1043 rb_erase(&res_arr[i]->node, root);
1044 list_del_init(&res_arr[i]->list);
1047 spin_unlock_irq(mlx4_tlock(dev));
1049 for (i = 0; i < count; ++i)
1057 static int remove_qp_ok(struct res_qp *res)
1059 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1060 !list_empty(&res->mcg_list)) {
1061 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1062 res->com.state, atomic_read(&res->ref_count));
1064 } else if (res->com.state != RES_QP_RESERVED) {
1071 static int remove_mtt_ok(struct res_mtt *res, int order)
1073 if (res->com.state == RES_MTT_BUSY ||
1074 atomic_read(&res->ref_count)) {
1075 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1077 mtt_states_str(res->com.state),
1078 atomic_read(&res->ref_count));
1080 } else if (res->com.state != RES_MTT_ALLOCATED)
1082 else if (res->order != order)
1088 static int remove_mpt_ok(struct res_mpt *res)
1090 if (res->com.state == RES_MPT_BUSY)
1092 else if (res->com.state != RES_MPT_RESERVED)
1098 static int remove_eq_ok(struct res_eq *res)
1100 if (res->com.state == RES_MPT_BUSY)
1102 else if (res->com.state != RES_MPT_RESERVED)
1108 static int remove_counter_ok(struct res_counter *res)
1110 if (res->com.state == RES_COUNTER_BUSY)
1112 else if (res->com.state != RES_COUNTER_ALLOCATED)
1118 static int remove_xrcdn_ok(struct res_xrcdn *res)
1120 if (res->com.state == RES_XRCD_BUSY)
1122 else if (res->com.state != RES_XRCD_ALLOCATED)
1128 static int remove_fs_rule_ok(struct res_fs_rule *res)
1130 if (res->com.state == RES_FS_RULE_BUSY)
1132 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1138 static int remove_cq_ok(struct res_cq *res)
1140 if (res->com.state == RES_CQ_BUSY)
1142 else if (res->com.state != RES_CQ_ALLOCATED)
1148 static int remove_srq_ok(struct res_srq *res)
1150 if (res->com.state == RES_SRQ_BUSY)
1152 else if (res->com.state != RES_SRQ_ALLOCATED)
1158 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1162 return remove_qp_ok((struct res_qp *)res);
1164 return remove_cq_ok((struct res_cq *)res);
1166 return remove_srq_ok((struct res_srq *)res);
1168 return remove_mpt_ok((struct res_mpt *)res);
1170 return remove_mtt_ok((struct res_mtt *)res, extra);
1174 return remove_eq_ok((struct res_eq *)res);
1176 return remove_counter_ok((struct res_counter *)res);
1178 return remove_xrcdn_ok((struct res_xrcdn *)res);
1180 return remove_fs_rule_ok((struct res_fs_rule *)res);
1186 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1187 enum mlx4_resource type, int extra)
1191 struct mlx4_priv *priv = mlx4_priv(dev);
1192 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1193 struct res_common *r;
1195 spin_lock_irq(mlx4_tlock(dev));
1196 for (i = base; i < base + count; ++i) {
1197 r = res_tracker_lookup(&tracker->res_tree[type], i);
1202 if (r->owner != slave) {
1206 err = remove_ok(r, type, extra);
1211 for (i = base; i < base + count; ++i) {
1212 r = res_tracker_lookup(&tracker->res_tree[type], i);
1213 rb_erase(&r->node, &tracker->res_tree[type]);
1220 spin_unlock_irq(mlx4_tlock(dev));
1225 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1226 enum res_qp_states state, struct res_qp **qp,
1229 struct mlx4_priv *priv = mlx4_priv(dev);
1230 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1234 spin_lock_irq(mlx4_tlock(dev));
1235 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1238 else if (r->com.owner != slave)
1243 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1244 __func__, (unsigned long long)r->com.res_id);
1248 case RES_QP_RESERVED:
1249 if (r->com.state == RES_QP_MAPPED && !alloc)
1252 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", (unsigned long long)r->com.res_id);
1257 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1258 r->com.state == RES_QP_HW)
1261 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1262 (unsigned long long)r->com.res_id);
1269 if (r->com.state != RES_QP_MAPPED)
1277 r->com.from_state = r->com.state;
1278 r->com.to_state = state;
1279 r->com.state = RES_QP_BUSY;
1285 spin_unlock_irq(mlx4_tlock(dev));
1290 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1291 enum res_mpt_states state, struct res_mpt **mpt)
1293 struct mlx4_priv *priv = mlx4_priv(dev);
1294 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1298 spin_lock_irq(mlx4_tlock(dev));
1299 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1302 else if (r->com.owner != slave)
1310 case RES_MPT_RESERVED:
1311 if (r->com.state != RES_MPT_MAPPED)
1315 case RES_MPT_MAPPED:
1316 if (r->com.state != RES_MPT_RESERVED &&
1317 r->com.state != RES_MPT_HW)
1322 if (r->com.state != RES_MPT_MAPPED)
1330 r->com.from_state = r->com.state;
1331 r->com.to_state = state;
1332 r->com.state = RES_MPT_BUSY;
1338 spin_unlock_irq(mlx4_tlock(dev));
1343 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1344 enum res_eq_states state, struct res_eq **eq)
1346 struct mlx4_priv *priv = mlx4_priv(dev);
1347 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1351 spin_lock_irq(mlx4_tlock(dev));
1352 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1355 else if (r->com.owner != slave)
1363 case RES_EQ_RESERVED:
1364 if (r->com.state != RES_EQ_HW)
1369 if (r->com.state != RES_EQ_RESERVED)
1378 r->com.from_state = r->com.state;
1379 r->com.to_state = state;
1380 r->com.state = RES_EQ_BUSY;
1386 spin_unlock_irq(mlx4_tlock(dev));
1391 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1392 enum res_cq_states state, struct res_cq **cq)
1394 struct mlx4_priv *priv = mlx4_priv(dev);
1395 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1399 spin_lock_irq(mlx4_tlock(dev));
1400 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1403 else if (r->com.owner != slave)
1411 case RES_CQ_ALLOCATED:
1412 if (r->com.state != RES_CQ_HW)
1414 else if (atomic_read(&r->ref_count))
1421 if (r->com.state != RES_CQ_ALLOCATED)
1432 r->com.from_state = r->com.state;
1433 r->com.to_state = state;
1434 r->com.state = RES_CQ_BUSY;
1440 spin_unlock_irq(mlx4_tlock(dev));
1445 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1446 enum res_srq_states state, struct res_srq **srq)
1448 struct mlx4_priv *priv = mlx4_priv(dev);
1449 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1453 spin_lock_irq(mlx4_tlock(dev));
1454 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1457 else if (r->com.owner != slave)
1465 case RES_SRQ_ALLOCATED:
1466 if (r->com.state != RES_SRQ_HW)
1468 else if (atomic_read(&r->ref_count))
1473 if (r->com.state != RES_SRQ_ALLOCATED)
1482 r->com.from_state = r->com.state;
1483 r->com.to_state = state;
1484 r->com.state = RES_SRQ_BUSY;
1490 spin_unlock_irq(mlx4_tlock(dev));
1495 static void res_abort_move(struct mlx4_dev *dev, int slave,
1496 enum mlx4_resource type, int id)
1498 struct mlx4_priv *priv = mlx4_priv(dev);
1499 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1500 struct res_common *r;
1502 spin_lock_irq(mlx4_tlock(dev));
1503 r = res_tracker_lookup(&tracker->res_tree[type], id);
1504 if (r && (r->owner == slave))
1505 r->state = r->from_state;
1506 spin_unlock_irq(mlx4_tlock(dev));
1509 static void res_end_move(struct mlx4_dev *dev, int slave,
1510 enum mlx4_resource type, int id)
1512 struct mlx4_priv *priv = mlx4_priv(dev);
1513 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1514 struct res_common *r;
1516 spin_lock_irq(mlx4_tlock(dev));
1517 r = res_tracker_lookup(&tracker->res_tree[type], id);
1518 if (r && (r->owner == slave))
1519 r->state = r->to_state;
1520 spin_unlock_irq(mlx4_tlock(dev));
1523 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1525 return mlx4_is_qp_reserved(dev, qpn) &&
1526 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1529 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1531 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1534 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1535 u64 in_param, u64 *out_param)
1545 case RES_OP_RESERVE:
1546 count = get_param_l(&in_param) & 0xffffff;
1547 flags = get_param_l(&in_param) >> 24;
1548 align = get_param_h(&in_param);
1549 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1553 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1555 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1559 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1561 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1562 __mlx4_qp_release_range(dev, base, count);
1565 set_param_l(out_param, base);
1567 case RES_OP_MAP_ICM:
1568 qpn = get_param_l(&in_param) & 0x7fffff;
1569 if (valid_reserved(dev, slave, qpn)) {
1570 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1575 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1580 if (!fw_reserved(dev, qpn)) {
1581 err = __mlx4_qp_alloc_icm(dev, qpn);
1583 res_abort_move(dev, slave, RES_QP, qpn);
1588 res_end_move(dev, slave, RES_QP, qpn);
1598 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1599 u64 in_param, u64 *out_param)
1605 if (op != RES_OP_RESERVE_AND_MAP)
1608 order = get_param_l(&in_param);
1610 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1614 base = __mlx4_alloc_mtt_range(dev, order);
1616 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1620 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1622 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1623 __mlx4_free_mtt_range(dev, base, order);
1625 set_param_l(out_param, base);
1630 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1631 u64 in_param, u64 *out_param)
1636 struct res_mpt *mpt;
1639 case RES_OP_RESERVE:
1640 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1644 index = __mlx4_mpt_reserve(dev);
1646 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1649 id = index & mpt_mask(dev);
1651 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1653 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1654 __mlx4_mpt_release(dev, index);
1657 set_param_l(out_param, index);
1659 case RES_OP_MAP_ICM:
1660 index = get_param_l(&in_param);
1661 id = index & mpt_mask(dev);
1662 err = mr_res_start_move_to(dev, slave, id,
1663 RES_MPT_MAPPED, &mpt);
1667 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1669 res_abort_move(dev, slave, RES_MPT, id);
1673 res_end_move(dev, slave, RES_MPT, id);
1679 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1680 u64 in_param, u64 *out_param)
1686 case RES_OP_RESERVE_AND_MAP:
1687 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1691 err = __mlx4_cq_alloc_icm(dev, &cqn);
1693 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1697 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1699 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1700 __mlx4_cq_free_icm(dev, cqn);
1704 set_param_l(out_param, cqn);
1714 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1715 u64 in_param, u64 *out_param)
1721 case RES_OP_RESERVE_AND_MAP:
1722 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1726 err = __mlx4_srq_alloc_icm(dev, &srqn);
1728 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1732 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1734 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1735 __mlx4_srq_free_icm(dev, srqn);
1739 set_param_l(out_param, srqn);
1749 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1750 u8 smac_index, u64 *mac)
1752 struct mlx4_priv *priv = mlx4_priv(dev);
1753 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1754 struct list_head *mac_list =
1755 &tracker->slave_list[slave].res_list[RES_MAC];
1756 struct mac_res *res, *tmp;
1758 list_for_each_entry_safe(res, tmp, mac_list, list) {
1759 if (res->smac_index == smac_index && res->port == (u8) port) {
1767 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1769 struct mlx4_priv *priv = mlx4_priv(dev);
1770 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1771 struct list_head *mac_list =
1772 &tracker->slave_list[slave].res_list[RES_MAC];
1773 struct mac_res *res, *tmp;
1775 list_for_each_entry_safe(res, tmp, mac_list, list) {
1776 if (res->mac == mac && res->port == (u8) port) {
1777 /* mac found. update ref count */
1783 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1785 res = kzalloc(sizeof *res, GFP_KERNEL);
1787 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1791 res->port = (u8) port;
1792 res->smac_index = smac_index;
1794 list_add_tail(&res->list,
1795 &tracker->slave_list[slave].res_list[RES_MAC]);
1800 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1803 struct mlx4_priv *priv = mlx4_priv(dev);
1804 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1805 struct list_head *mac_list =
1806 &tracker->slave_list[slave].res_list[RES_MAC];
1807 struct mac_res *res, *tmp;
1809 list_for_each_entry_safe(res, tmp, mac_list, list) {
1810 if (res->mac == mac && res->port == (u8) port) {
1811 if (!--res->ref_count) {
1812 list_del(&res->list);
1813 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1821 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1823 struct mlx4_priv *priv = mlx4_priv(dev);
1824 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1825 struct list_head *mac_list =
1826 &tracker->slave_list[slave].res_list[RES_MAC];
1827 struct mac_res *res, *tmp;
1830 list_for_each_entry_safe(res, tmp, mac_list, list) {
1831 list_del(&res->list);
1832 /* dereference the mac the num times the slave referenced it */
1833 for (i = 0; i < res->ref_count; i++)
1834 __mlx4_unregister_mac(dev, res->port, res->mac);
1835 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1840 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1841 u64 in_param, u64 *out_param, int in_port)
1848 if (op != RES_OP_RESERVE_AND_MAP)
1851 port = !in_port ? get_param_l(out_param) : in_port;
1854 err = __mlx4_register_mac(dev, port, mac);
1857 set_param_l(out_param, err);
1862 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1864 __mlx4_unregister_mac(dev, port, mac);
1869 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1870 int port, int vlan_index)
1872 struct mlx4_priv *priv = mlx4_priv(dev);
1873 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1874 struct list_head *vlan_list =
1875 &tracker->slave_list[slave].res_list[RES_VLAN];
1876 struct vlan_res *res, *tmp;
1878 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1879 if (res->vlan == vlan && res->port == (u8) port) {
1880 /* vlan found. update ref count */
1886 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1888 res = kzalloc(sizeof(*res), GFP_KERNEL);
1890 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1894 res->port = (u8) port;
1895 res->vlan_index = vlan_index;
1897 list_add_tail(&res->list,
1898 &tracker->slave_list[slave].res_list[RES_VLAN]);
1903 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1906 struct mlx4_priv *priv = mlx4_priv(dev);
1907 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1908 struct list_head *vlan_list =
1909 &tracker->slave_list[slave].res_list[RES_VLAN];
1910 struct vlan_res *res, *tmp;
1912 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1913 if (res->vlan == vlan && res->port == (u8) port) {
1914 if (!--res->ref_count) {
1915 list_del(&res->list);
1916 mlx4_release_resource(dev, slave, RES_VLAN,
1925 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1927 struct mlx4_priv *priv = mlx4_priv(dev);
1928 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1929 struct list_head *vlan_list =
1930 &tracker->slave_list[slave].res_list[RES_VLAN];
1931 struct vlan_res *res, *tmp;
1934 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1935 list_del(&res->list);
1936 /* dereference the vlan the num times the slave referenced it */
1937 for (i = 0; i < res->ref_count; i++)
1938 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1939 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1944 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1945 u64 in_param, u64 *out_param, int in_port)
1947 struct mlx4_priv *priv = mlx4_priv(dev);
1948 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1954 port = !in_port ? get_param_l(out_param) : in_port;
1959 if (op != RES_OP_RESERVE_AND_MAP)
1962 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1963 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1964 slave_state[slave].old_vlan_api = true;
1968 vlan = (u16) in_param;
1970 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1972 set_param_l(out_param, (u32) vlan_index);
1973 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1975 __mlx4_unregister_vlan(dev, port, vlan);
1980 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1981 u64 in_param, u64 *out_param, int port)
1986 if (op != RES_OP_RESERVE)
1989 err = __mlx4_counter_alloc(dev, slave, port, &index);
1991 set_param_l(out_param, index);
1996 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1997 u64 in_param, u64 *out_param)
2002 if (op != RES_OP_RESERVE)
2005 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2009 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2011 __mlx4_xrcd_free(dev, xrcdn);
2013 set_param_l(out_param, xrcdn);
2018 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2019 struct mlx4_vhcr *vhcr,
2020 struct mlx4_cmd_mailbox *inbox,
2021 struct mlx4_cmd_mailbox *outbox,
2022 struct mlx4_cmd_info *cmd)
2025 int alop = vhcr->op_modifier;
2027 switch (vhcr->in_modifier & 0xFF) {
2029 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2030 vhcr->in_param, &vhcr->out_param);
2034 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2035 vhcr->in_param, &vhcr->out_param);
2039 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2040 vhcr->in_param, &vhcr->out_param);
2044 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2045 vhcr->in_param, &vhcr->out_param);
2049 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2050 vhcr->in_param, &vhcr->out_param);
2054 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2055 vhcr->in_param, &vhcr->out_param,
2056 (vhcr->in_modifier >> 8) & 0xFF);
2060 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2061 vhcr->in_param, &vhcr->out_param,
2062 (vhcr->in_modifier >> 8) & 0xFF);
2066 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2067 vhcr->in_param, &vhcr->out_param,
2068 (vhcr->in_modifier >> 8) & 0xFF);
2072 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2073 vhcr->in_param, &vhcr->out_param);
2084 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2093 case RES_OP_RESERVE:
2094 base = get_param_l(&in_param) & 0x7fffff;
2095 count = get_param_h(&in_param);
2096 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2099 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2100 __mlx4_qp_release_range(dev, base, count);
2102 case RES_OP_MAP_ICM:
2103 qpn = get_param_l(&in_param) & 0x7fffff;
2104 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2109 if (!fw_reserved(dev, qpn))
2110 __mlx4_qp_free_icm(dev, qpn);
2112 res_end_move(dev, slave, RES_QP, qpn);
2114 if (valid_reserved(dev, slave, qpn))
2115 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2124 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2125 u64 in_param, u64 *out_param)
2131 if (op != RES_OP_RESERVE_AND_MAP)
2134 base = get_param_l(&in_param);
2135 order = get_param_h(&in_param);
2136 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2138 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2139 __mlx4_free_mtt_range(dev, base, order);
2144 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2150 struct res_mpt *mpt;
2153 case RES_OP_RESERVE:
2154 index = get_param_l(&in_param);
2155 id = index & mpt_mask(dev);
2156 err = get_res(dev, slave, id, RES_MPT, &mpt);
2160 put_res(dev, slave, id, RES_MPT);
2162 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2165 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2166 __mlx4_mpt_release(dev, index);
2168 case RES_OP_MAP_ICM:
2169 index = get_param_l(&in_param);
2170 id = index & mpt_mask(dev);
2171 err = mr_res_start_move_to(dev, slave, id,
2172 RES_MPT_RESERVED, &mpt);
2176 __mlx4_mpt_free_icm(dev, mpt->key);
2177 res_end_move(dev, slave, RES_MPT, id);
2187 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2188 u64 in_param, u64 *out_param)
2194 case RES_OP_RESERVE_AND_MAP:
2195 cqn = get_param_l(&in_param);
2196 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2200 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2201 __mlx4_cq_free_icm(dev, cqn);
2212 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2213 u64 in_param, u64 *out_param)
2219 case RES_OP_RESERVE_AND_MAP:
2220 srqn = get_param_l(&in_param);
2221 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2225 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2226 __mlx4_srq_free_icm(dev, srqn);
2237 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2238 u64 in_param, u64 *out_param, int in_port)
2244 case RES_OP_RESERVE_AND_MAP:
2245 port = !in_port ? get_param_l(out_param) : in_port;
2246 mac_del_from_slave(dev, slave, in_param, port);
2247 __mlx4_unregister_mac(dev, port, in_param);
2258 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2259 u64 in_param, u64 *out_param, int port)
2261 struct mlx4_priv *priv = mlx4_priv(dev);
2262 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2266 case RES_OP_RESERVE_AND_MAP:
2267 if (slave_state[slave].old_vlan_api == true)
2271 vlan_del_from_slave(dev, slave, in_param, port);
2272 __mlx4_unregister_vlan(dev, port, in_param);
2282 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2283 u64 in_param, u64 *out_param, int port)
2287 if (op != RES_OP_RESERVE)
2290 index = get_param_l(&in_param);
2292 __mlx4_counter_free(dev, slave, port, index);
2297 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2298 u64 in_param, u64 *out_param)
2303 if (op != RES_OP_RESERVE)
2306 xrcdn = get_param_l(&in_param);
2307 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2311 __mlx4_xrcd_free(dev, xrcdn);
2316 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2317 struct mlx4_vhcr *vhcr,
2318 struct mlx4_cmd_mailbox *inbox,
2319 struct mlx4_cmd_mailbox *outbox,
2320 struct mlx4_cmd_info *cmd)
2323 int alop = vhcr->op_modifier;
2325 switch (vhcr->in_modifier & 0xFF) {
2327 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2332 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2333 vhcr->in_param, &vhcr->out_param);
2337 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2342 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2343 vhcr->in_param, &vhcr->out_param);
2347 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2348 vhcr->in_param, &vhcr->out_param);
2352 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2353 vhcr->in_param, &vhcr->out_param,
2354 (vhcr->in_modifier >> 8) & 0xFF);
2358 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2359 vhcr->in_param, &vhcr->out_param,
2360 (vhcr->in_modifier >> 8) & 0xFF);
2364 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2365 vhcr->in_param, &vhcr->out_param,
2366 (vhcr->in_modifier >> 8) & 0xFF);
2370 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2371 vhcr->in_param, &vhcr->out_param);
2379 /* ugly but other choices are uglier */
2380 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2382 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2385 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2387 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2390 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2392 return be32_to_cpu(mpt->mtt_sz);
2395 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2397 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2400 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2402 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2405 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2407 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2410 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2412 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2415 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2417 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2420 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2422 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2425 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2427 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2428 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2429 int log_sq_sride = qpc->sq_size_stride & 7;
2430 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2431 int log_rq_stride = qpc->rq_size_stride & 7;
2432 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2433 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2434 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2435 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2440 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2442 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2443 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2444 total_mem = sq_size + rq_size;
2446 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2452 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2453 int size, struct res_mtt *mtt)
2455 int res_start = mtt->com.res_id;
2456 int res_size = (1 << mtt->order);
2458 if (start < res_start || start + size > res_start + res_size)
2463 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2464 struct mlx4_vhcr *vhcr,
2465 struct mlx4_cmd_mailbox *inbox,
2466 struct mlx4_cmd_mailbox *outbox,
2467 struct mlx4_cmd_info *cmd)
2470 int index = vhcr->in_modifier;
2471 struct res_mtt *mtt;
2472 struct res_mpt *mpt;
2473 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2479 id = index & mpt_mask(dev);
2480 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2484 /* Currently disable memory windows since this feature isn't tested yet
2485 * under virtualization.
2487 if (!mr_is_region(inbox->buf)) {
2492 /* Make sure that the PD bits related to the slave id are zeros. */
2493 pd = mr_get_pd(inbox->buf);
2494 pd_slave = (pd >> 17) & 0x7f;
2495 if (pd_slave != 0 && pd_slave != slave) {
2500 if (mr_is_fmr(inbox->buf)) {
2501 /* FMR and Bind Enable are forbidden in slave devices. */
2502 if (mr_is_bind_enabled(inbox->buf)) {
2506 /* FMR and Memory Windows are also forbidden. */
2507 if (!mr_is_region(inbox->buf)) {
2513 phys = mr_phys_mpt(inbox->buf);
2515 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2519 err = check_mtt_range(dev, slave, mtt_base,
2520 mr_get_mtt_size(inbox->buf), mtt);
2527 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2532 atomic_inc(&mtt->ref_count);
2533 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2536 res_end_move(dev, slave, RES_MPT, id);
2541 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2543 res_abort_move(dev, slave, RES_MPT, id);
2548 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2549 struct mlx4_vhcr *vhcr,
2550 struct mlx4_cmd_mailbox *inbox,
2551 struct mlx4_cmd_mailbox *outbox,
2552 struct mlx4_cmd_info *cmd)
2555 int index = vhcr->in_modifier;
2556 struct res_mpt *mpt;
2559 id = index & mpt_mask(dev);
2560 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2564 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2569 atomic_dec(&mpt->mtt->ref_count);
2571 res_end_move(dev, slave, RES_MPT, id);
2575 res_abort_move(dev, slave, RES_MPT, id);
2580 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2581 struct mlx4_vhcr *vhcr,
2582 struct mlx4_cmd_mailbox *inbox,
2583 struct mlx4_cmd_mailbox *outbox,
2584 struct mlx4_cmd_info *cmd)
2587 int index = vhcr->in_modifier;
2588 struct res_mpt *mpt;
2591 id = index & mpt_mask(dev);
2592 err = get_res(dev, slave, id, RES_MPT, &mpt);
2596 if (mpt->com.from_state != RES_MPT_HW) {
2601 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2604 put_res(dev, slave, id, RES_MPT);
2608 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2610 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2613 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2615 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2618 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2620 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2623 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2624 struct mlx4_qp_context *context)
2626 u32 qpn = vhcr->in_modifier & 0xffffff;
2629 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2632 /* adjust qkey in qp context */
2633 context->qkey = cpu_to_be32(qkey);
2636 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2637 struct mlx4_vhcr *vhcr,
2638 struct mlx4_cmd_mailbox *inbox,
2639 struct mlx4_cmd_mailbox *outbox,
2640 struct mlx4_cmd_info *cmd)
2643 int qpn = vhcr->in_modifier & 0x7fffff;
2644 struct res_mtt *mtt;
2646 struct mlx4_qp_context *qpc = inbox->buf + 8;
2647 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2648 int mtt_size = qp_get_mtt_size(qpc);
2651 int rcqn = qp_get_rcqn(qpc);
2652 int scqn = qp_get_scqn(qpc);
2653 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2654 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2655 struct res_srq *srq;
2656 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2658 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2661 qp->local_qpn = local_qpn;
2662 qp->sched_queue = 0;
2664 qp->vlan_control = 0;
2666 qp->pri_path_fl = 0;
2669 qp->qpc_flags = be32_to_cpu(qpc->flags);
2671 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2675 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2679 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2684 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2691 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2696 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2697 update_pkey_index(dev, slave, inbox);
2698 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2701 atomic_inc(&mtt->ref_count);
2703 atomic_inc(&rcq->ref_count);
2705 atomic_inc(&scq->ref_count);
2709 put_res(dev, slave, scqn, RES_CQ);
2712 atomic_inc(&srq->ref_count);
2713 put_res(dev, slave, srqn, RES_SRQ);
2716 put_res(dev, slave, rcqn, RES_CQ);
2717 put_res(dev, slave, mtt_base, RES_MTT);
2718 res_end_move(dev, slave, RES_QP, qpn);
2724 put_res(dev, slave, srqn, RES_SRQ);
2727 put_res(dev, slave, scqn, RES_CQ);
2729 put_res(dev, slave, rcqn, RES_CQ);
2731 put_res(dev, slave, mtt_base, RES_MTT);
2733 res_abort_move(dev, slave, RES_QP, qpn);
2738 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2740 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2743 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2745 int log_eq_size = eqc->log_eq_size & 0x1f;
2746 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2748 if (log_eq_size + 5 < page_shift)
2751 return 1 << (log_eq_size + 5 - page_shift);
2754 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2756 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2759 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2761 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2762 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2764 if (log_cq_size + 5 < page_shift)
2767 return 1 << (log_cq_size + 5 - page_shift);
2770 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2771 struct mlx4_vhcr *vhcr,
2772 struct mlx4_cmd_mailbox *inbox,
2773 struct mlx4_cmd_mailbox *outbox,
2774 struct mlx4_cmd_info *cmd)
2777 int eqn = vhcr->in_modifier;
2778 int res_id = (slave << 8) | eqn;
2779 struct mlx4_eq_context *eqc = inbox->buf;
2780 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2781 int mtt_size = eq_get_mtt_size(eqc);
2783 struct res_mtt *mtt;
2785 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2788 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2792 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2796 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2800 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2804 atomic_inc(&mtt->ref_count);
2806 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2807 res_end_move(dev, slave, RES_EQ, res_id);
2811 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2813 res_abort_move(dev, slave, RES_EQ, res_id);
2815 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2819 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2820 int len, struct res_mtt **res)
2822 struct mlx4_priv *priv = mlx4_priv(dev);
2823 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2824 struct res_mtt *mtt;
2827 spin_lock_irq(mlx4_tlock(dev));
2828 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2830 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2832 mtt->com.from_state = mtt->com.state;
2833 mtt->com.state = RES_MTT_BUSY;
2838 spin_unlock_irq(mlx4_tlock(dev));
2843 static int verify_qp_parameters(struct mlx4_dev *dev,
2844 struct mlx4_cmd_mailbox *inbox,
2845 enum qp_transition transition, u8 slave)
2848 struct mlx4_qp_context *qp_ctx;
2849 enum mlx4_qp_optpar optpar;
2853 qp_ctx = inbox->buf + 8;
2854 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2855 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2860 switch (transition) {
2861 case QP_TRANS_INIT2RTR:
2862 case QP_TRANS_RTR2RTS:
2863 case QP_TRANS_RTS2RTS:
2864 case QP_TRANS_SQD2SQD:
2865 case QP_TRANS_SQD2RTS:
2866 if (slave != mlx4_master_func_num(dev))
2867 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2868 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2869 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2870 num_gids = mlx4_get_slave_num_gids(dev, slave);
2873 if (qp_ctx->pri_path.mgid_index >= num_gids)
2876 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2877 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2878 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2879 num_gids = mlx4_get_slave_num_gids(dev, slave);
2882 if (qp_ctx->alt_path.mgid_index >= num_gids)
2898 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2899 struct mlx4_vhcr *vhcr,
2900 struct mlx4_cmd_mailbox *inbox,
2901 struct mlx4_cmd_mailbox *outbox,
2902 struct mlx4_cmd_info *cmd)
2904 struct mlx4_mtt mtt;
2905 __be64 *page_list = inbox->buf;
2906 u64 *pg_list = (u64 *)page_list;
2908 struct res_mtt *rmtt = NULL;
2909 int start = be64_to_cpu(page_list[0]);
2910 int npages = vhcr->in_modifier;
2913 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2917 /* Call the SW implementation of write_mtt:
2918 * - Prepare a dummy mtt struct
2919 * - Translate inbox contents to simple addresses in host endianess */
2920 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2921 we don't really use it */
2924 for (i = 0; i < npages; ++i)
2925 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2927 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2928 ((u64 *)page_list + 2));
2931 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2936 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2937 struct mlx4_vhcr *vhcr,
2938 struct mlx4_cmd_mailbox *inbox,
2939 struct mlx4_cmd_mailbox *outbox,
2940 struct mlx4_cmd_info *cmd)
2942 int eqn = vhcr->in_modifier;
2943 int res_id = eqn | (slave << 8);
2947 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2951 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2955 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2959 atomic_dec(&eq->mtt->ref_count);
2960 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2961 res_end_move(dev, slave, RES_EQ, res_id);
2962 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2967 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2969 res_abort_move(dev, slave, RES_EQ, res_id);
2974 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2976 struct mlx4_priv *priv = mlx4_priv(dev);
2977 struct mlx4_slave_event_eq_info *event_eq;
2978 struct mlx4_cmd_mailbox *mailbox;
2979 u32 in_modifier = 0;
2984 if (!priv->mfunc.master.slave_state)
2987 /* check for slave valid, slave not PF, and slave active */
2988 if (slave < 0 || slave >= dev->num_slaves ||
2989 slave == dev->caps.function ||
2990 !priv->mfunc.master.slave_state[slave].active)
2993 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2995 /* Create the event only if the slave is registered */
2996 if (event_eq->eqn < 0)
2999 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3000 res_id = (slave << 8) | event_eq->eqn;
3001 err = get_res(dev, slave, res_id, RES_EQ, &req);
3005 if (req->com.from_state != RES_EQ_HW) {
3010 mailbox = mlx4_alloc_cmd_mailbox(dev);
3011 if (IS_ERR(mailbox)) {
3012 err = PTR_ERR(mailbox);
3016 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3018 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3021 memcpy(mailbox->buf, (u8 *) eqe, 28);
3023 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3025 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3026 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3029 put_res(dev, slave, res_id, RES_EQ);
3030 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3031 mlx4_free_cmd_mailbox(dev, mailbox);
3035 put_res(dev, slave, res_id, RES_EQ);
3038 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3042 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3043 struct mlx4_vhcr *vhcr,
3044 struct mlx4_cmd_mailbox *inbox,
3045 struct mlx4_cmd_mailbox *outbox,
3046 struct mlx4_cmd_info *cmd)
3048 int eqn = vhcr->in_modifier;
3049 int res_id = eqn | (slave << 8);
3053 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3057 if (eq->com.from_state != RES_EQ_HW) {
3062 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3065 put_res(dev, slave, res_id, RES_EQ);
3069 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3070 struct mlx4_vhcr *vhcr,
3071 struct mlx4_cmd_mailbox *inbox,
3072 struct mlx4_cmd_mailbox *outbox,
3073 struct mlx4_cmd_info *cmd)
3076 int cqn = vhcr->in_modifier;
3077 struct mlx4_cq_context *cqc = inbox->buf;
3078 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3080 struct res_mtt *mtt;
3082 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3085 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3088 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3091 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3094 atomic_inc(&mtt->ref_count);
3096 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3097 res_end_move(dev, slave, RES_CQ, cqn);
3101 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3103 res_abort_move(dev, slave, RES_CQ, cqn);
3107 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3108 struct mlx4_vhcr *vhcr,
3109 struct mlx4_cmd_mailbox *inbox,
3110 struct mlx4_cmd_mailbox *outbox,
3111 struct mlx4_cmd_info *cmd)
3114 int cqn = vhcr->in_modifier;
3117 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3120 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3123 atomic_dec(&cq->mtt->ref_count);
3124 res_end_move(dev, slave, RES_CQ, cqn);
3128 res_abort_move(dev, slave, RES_CQ, cqn);
3132 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3133 struct mlx4_vhcr *vhcr,
3134 struct mlx4_cmd_mailbox *inbox,
3135 struct mlx4_cmd_mailbox *outbox,
3136 struct mlx4_cmd_info *cmd)
3138 int cqn = vhcr->in_modifier;
3142 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3146 if (cq->com.from_state != RES_CQ_HW)
3149 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3151 put_res(dev, slave, cqn, RES_CQ);
3156 static int handle_resize(struct mlx4_dev *dev, int slave,
3157 struct mlx4_vhcr *vhcr,
3158 struct mlx4_cmd_mailbox *inbox,
3159 struct mlx4_cmd_mailbox *outbox,
3160 struct mlx4_cmd_info *cmd,
3164 struct res_mtt *orig_mtt;
3165 struct res_mtt *mtt;
3166 struct mlx4_cq_context *cqc = inbox->buf;
3167 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3169 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3173 if (orig_mtt != cq->mtt) {
3178 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3182 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3185 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3188 atomic_dec(&orig_mtt->ref_count);
3189 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3190 atomic_inc(&mtt->ref_count);
3192 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3196 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3198 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3204 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3205 struct mlx4_vhcr *vhcr,
3206 struct mlx4_cmd_mailbox *inbox,
3207 struct mlx4_cmd_mailbox *outbox,
3208 struct mlx4_cmd_info *cmd)
3210 int cqn = vhcr->in_modifier;
3214 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3218 if (cq->com.from_state != RES_CQ_HW)
3221 if (vhcr->op_modifier == 0) {
3222 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3226 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3228 put_res(dev, slave, cqn, RES_CQ);
3233 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3235 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3236 int log_rq_stride = srqc->logstride & 7;
3237 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3239 if (log_srq_size + log_rq_stride + 4 < page_shift)
3242 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3245 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3246 struct mlx4_vhcr *vhcr,
3247 struct mlx4_cmd_mailbox *inbox,
3248 struct mlx4_cmd_mailbox *outbox,
3249 struct mlx4_cmd_info *cmd)
3252 int srqn = vhcr->in_modifier;
3253 struct res_mtt *mtt;
3254 struct res_srq *srq;
3255 struct mlx4_srq_context *srqc = inbox->buf;
3256 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3258 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3261 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3264 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3267 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3272 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3276 atomic_inc(&mtt->ref_count);
3278 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3279 res_end_move(dev, slave, RES_SRQ, srqn);
3283 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3285 res_abort_move(dev, slave, RES_SRQ, srqn);
3290 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3291 struct mlx4_vhcr *vhcr,
3292 struct mlx4_cmd_mailbox *inbox,
3293 struct mlx4_cmd_mailbox *outbox,
3294 struct mlx4_cmd_info *cmd)
3297 int srqn = vhcr->in_modifier;
3298 struct res_srq *srq;
3300 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3303 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3306 atomic_dec(&srq->mtt->ref_count);
3308 atomic_dec(&srq->cq->ref_count);
3309 res_end_move(dev, slave, RES_SRQ, srqn);
3314 res_abort_move(dev, slave, RES_SRQ, srqn);
3319 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3320 struct mlx4_vhcr *vhcr,
3321 struct mlx4_cmd_mailbox *inbox,
3322 struct mlx4_cmd_mailbox *outbox,
3323 struct mlx4_cmd_info *cmd)
3326 int srqn = vhcr->in_modifier;
3327 struct res_srq *srq;
3329 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3332 if (srq->com.from_state != RES_SRQ_HW) {
3336 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3338 put_res(dev, slave, srqn, RES_SRQ);
3342 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3343 struct mlx4_vhcr *vhcr,
3344 struct mlx4_cmd_mailbox *inbox,
3345 struct mlx4_cmd_mailbox *outbox,
3346 struct mlx4_cmd_info *cmd)
3349 int srqn = vhcr->in_modifier;
3350 struct res_srq *srq;
3352 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3356 if (srq->com.from_state != RES_SRQ_HW) {
3361 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3363 put_res(dev, slave, srqn, RES_SRQ);
3367 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3368 struct mlx4_vhcr *vhcr,
3369 struct mlx4_cmd_mailbox *inbox,
3370 struct mlx4_cmd_mailbox *outbox,
3371 struct mlx4_cmd_info *cmd)
3374 int qpn = vhcr->in_modifier & 0x7fffff;
3377 err = get_res(dev, slave, qpn, RES_QP, &qp);
3380 if (qp->com.from_state != RES_QP_HW) {
3385 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3387 put_res(dev, slave, qpn, RES_QP);
3391 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3392 struct mlx4_vhcr *vhcr,
3393 struct mlx4_cmd_mailbox *inbox,
3394 struct mlx4_cmd_mailbox *outbox,
3395 struct mlx4_cmd_info *cmd)
3397 struct mlx4_qp_context *context = inbox->buf + 8;
3398 adjust_proxy_tun_qkey(dev, vhcr, context);
3399 update_pkey_index(dev, slave, inbox);
3400 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3403 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3404 struct mlx4_qp_context *qpc,
3405 struct mlx4_cmd_mailbox *inbox)
3409 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3410 u8 sched = *(u8 *)(inbox->buf + 64);
3413 port = (sched >> 6 & 1) + 1;
3414 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3415 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3416 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3422 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3423 struct mlx4_vhcr *vhcr,
3424 struct mlx4_cmd_mailbox *inbox,
3425 struct mlx4_cmd_mailbox *outbox,
3426 struct mlx4_cmd_info *cmd)
3429 struct mlx4_qp_context *qpc = inbox->buf + 8;
3430 int qpn = vhcr->in_modifier & 0x7fffff;
3432 u8 orig_sched_queue;
3433 __be32 orig_param3 = qpc->param3;
3434 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3435 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3436 u8 orig_pri_path_fl = qpc->pri_path.fl;
3437 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3438 u8 orig_feup = qpc->pri_path.feup;
3440 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3444 if (roce_verify_mac(dev, slave, qpc, inbox))
3447 update_pkey_index(dev, slave, inbox);
3448 update_gid(dev, inbox, (u8)slave);
3449 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3450 orig_sched_queue = qpc->pri_path.sched_queue;
3452 err = get_res(dev, slave, qpn, RES_QP, &qp);
3455 if (qp->com.from_state != RES_QP_HW) {
3460 /* do not modify vport QP params for RSS QPs */
3461 if (!(qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET))) {
3462 err = update_vport_qp_param(dev, inbox, slave, qpn);
3467 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3469 /* if no error, save sched queue value passed in by VF. This is
3470 * essentially the QOS value provided by the VF. This will be useful
3471 * if we allow dynamic changes from VST back to VGT
3474 qp->sched_queue = orig_sched_queue;
3475 qp->param3 = orig_param3;
3476 qp->vlan_control = orig_vlan_control;
3477 qp->fvl_rx = orig_fvl_rx;
3478 qp->pri_path_fl = orig_pri_path_fl;
3479 qp->vlan_index = orig_vlan_index;
3480 qp->feup = orig_feup;
3482 put_res(dev, slave, qpn, RES_QP);
3486 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3487 struct mlx4_vhcr *vhcr,
3488 struct mlx4_cmd_mailbox *inbox,
3489 struct mlx4_cmd_mailbox *outbox,
3490 struct mlx4_cmd_info *cmd)
3493 struct mlx4_qp_context *context = inbox->buf + 8;
3495 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3499 update_pkey_index(dev, slave, inbox);
3500 update_gid(dev, inbox, (u8)slave);
3501 adjust_proxy_tun_qkey(dev, vhcr, context);
3502 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3505 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3506 struct mlx4_vhcr *vhcr,
3507 struct mlx4_cmd_mailbox *inbox,
3508 struct mlx4_cmd_mailbox *outbox,
3509 struct mlx4_cmd_info *cmd)
3512 struct mlx4_qp_context *context = inbox->buf + 8;
3514 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3518 update_pkey_index(dev, slave, inbox);
3519 update_gid(dev, inbox, (u8)slave);
3520 adjust_proxy_tun_qkey(dev, vhcr, context);
3521 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3525 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3526 struct mlx4_vhcr *vhcr,
3527 struct mlx4_cmd_mailbox *inbox,
3528 struct mlx4_cmd_mailbox *outbox,
3529 struct mlx4_cmd_info *cmd)
3531 struct mlx4_qp_context *context = inbox->buf + 8;
3532 adjust_proxy_tun_qkey(dev, vhcr, context);
3533 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3536 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3537 struct mlx4_vhcr *vhcr,
3538 struct mlx4_cmd_mailbox *inbox,
3539 struct mlx4_cmd_mailbox *outbox,
3540 struct mlx4_cmd_info *cmd)
3543 struct mlx4_qp_context *context = inbox->buf + 8;
3545 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3549 adjust_proxy_tun_qkey(dev, vhcr, context);
3550 update_gid(dev, inbox, (u8)slave);
3551 update_pkey_index(dev, slave, inbox);
3552 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3555 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3556 struct mlx4_vhcr *vhcr,
3557 struct mlx4_cmd_mailbox *inbox,
3558 struct mlx4_cmd_mailbox *outbox,
3559 struct mlx4_cmd_info *cmd)
3562 struct mlx4_qp_context *context = inbox->buf + 8;
3564 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3568 adjust_proxy_tun_qkey(dev, vhcr, context);
3569 update_gid(dev, inbox, (u8)slave);
3570 update_pkey_index(dev, slave, inbox);
3571 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3574 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3575 struct mlx4_vhcr *vhcr,
3576 struct mlx4_cmd_mailbox *inbox,
3577 struct mlx4_cmd_mailbox *outbox,
3578 struct mlx4_cmd_info *cmd)
3581 int qpn = vhcr->in_modifier & 0x7fffff;
3584 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3587 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3591 atomic_dec(&qp->mtt->ref_count);
3592 atomic_dec(&qp->rcq->ref_count);
3593 atomic_dec(&qp->scq->ref_count);
3595 atomic_dec(&qp->srq->ref_count);
3596 res_end_move(dev, slave, RES_QP, qpn);
3600 res_abort_move(dev, slave, RES_QP, qpn);
3605 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3606 struct res_qp *rqp, u8 *gid)
3608 struct res_gid *res;
3610 list_for_each_entry(res, &rqp->mcg_list, list) {
3611 if (!memcmp(res->gid, gid, 16))
3617 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3618 u8 *gid, enum mlx4_protocol prot,
3619 enum mlx4_steer_type steer, u64 reg_id)
3621 struct res_gid *res;
3624 res = kzalloc(sizeof *res, GFP_KERNEL);
3628 spin_lock_irq(&rqp->mcg_spl);
3629 if (find_gid(dev, slave, rqp, gid)) {
3633 memcpy(res->gid, gid, 16);
3636 res->reg_id = reg_id;
3637 list_add_tail(&res->list, &rqp->mcg_list);
3640 spin_unlock_irq(&rqp->mcg_spl);
3645 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3646 u8 *gid, enum mlx4_protocol prot,
3647 enum mlx4_steer_type steer, u64 *reg_id)
3649 struct res_gid *res;
3652 spin_lock_irq(&rqp->mcg_spl);
3653 res = find_gid(dev, slave, rqp, gid);
3654 if (!res || res->prot != prot || res->steer != steer)
3657 *reg_id = res->reg_id;
3658 list_del(&res->list);
3662 spin_unlock_irq(&rqp->mcg_spl);
3667 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3668 int block_loopback, enum mlx4_protocol prot,
3669 enum mlx4_steer_type type, u64 *reg_id)
3671 switch (dev->caps.steering_mode) {
3672 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3673 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3674 block_loopback, prot,
3676 case MLX4_STEERING_MODE_B0:
3677 return mlx4_qp_attach_common(dev, qp, gid,
3678 block_loopback, prot, type);
3684 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3685 enum mlx4_protocol prot, enum mlx4_steer_type type,
3688 switch (dev->caps.steering_mode) {
3689 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3690 return mlx4_flow_detach(dev, reg_id);
3691 case MLX4_STEERING_MODE_B0:
3692 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3698 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3699 struct mlx4_vhcr *vhcr,
3700 struct mlx4_cmd_mailbox *inbox,
3701 struct mlx4_cmd_mailbox *outbox,
3702 struct mlx4_cmd_info *cmd)
3704 struct mlx4_qp qp; /* dummy for calling attach/detach */
3705 u8 *gid = inbox->buf;
3706 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3711 int attach = vhcr->op_modifier;
3712 int block_loopback = vhcr->in_modifier >> 31;
3713 u8 steer_type_mask = 2;
3714 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3716 qpn = vhcr->in_modifier & 0xffffff;
3717 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3723 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3726 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3729 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3733 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3737 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3739 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3740 qpn, (unsigned long long)reg_id);
3742 put_res(dev, slave, qpn, RES_QP);
3746 qp_detach(dev, &qp, gid, prot, type, reg_id);
3748 put_res(dev, slave, qpn, RES_QP);
3753 * MAC validation for Flow Steering rules.
3754 * VF can attach rules only with a mac address which is assigned to it.
3756 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3757 struct list_head *rlist)
3759 struct mac_res *res, *tmp;
3762 /* make sure it isn't multicast or broadcast mac*/
3763 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3764 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3765 list_for_each_entry_safe(res, tmp, rlist, list) {
3766 be_mac = cpu_to_be64(res->mac << 16);
3767 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3770 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3771 eth_header->eth.dst_mac, slave);
3778 * In case of missing eth header, append eth header with a MAC address
3779 * assigned to the VF.
3781 static int add_eth_header(struct mlx4_dev *dev, int slave,
3782 struct mlx4_cmd_mailbox *inbox,
3783 struct list_head *rlist, int header_id)
3785 struct mac_res *res, *tmp;
3787 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3788 struct mlx4_net_trans_rule_hw_eth *eth_header;
3789 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3790 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3792 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3794 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3796 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3798 /* Clear a space in the inbox for eth header */
3799 switch (header_id) {
3800 case MLX4_NET_TRANS_RULE_ID_IPV4:
3802 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3803 memmove(ip_header, eth_header,
3804 sizeof(*ip_header) + sizeof(*l4_header));
3806 case MLX4_NET_TRANS_RULE_ID_TCP:
3807 case MLX4_NET_TRANS_RULE_ID_UDP:
3808 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3810 memmove(l4_header, eth_header, sizeof(*l4_header));
3815 list_for_each_entry_safe(res, tmp, rlist, list) {
3816 if (port == res->port) {
3817 be_mac = cpu_to_be64(res->mac << 16);
3822 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3827 memset(eth_header, 0, sizeof(*eth_header));
3828 eth_header->size = sizeof(*eth_header) >> 2;
3829 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3830 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3831 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3837 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3838 struct mlx4_vhcr *vhcr,
3839 struct mlx4_cmd_mailbox *inbox,
3840 struct mlx4_cmd_mailbox *outbox,
3841 struct mlx4_cmd_info *cmd)
3844 struct mlx4_priv *priv = mlx4_priv(dev);
3845 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3846 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3850 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3851 struct _rule_hw *rule_header;
3854 if (dev->caps.steering_mode !=
3855 MLX4_STEERING_MODE_DEVICE_MANAGED)
3858 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3859 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3860 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3862 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3865 rule_header = (struct _rule_hw *)(ctrl + 1);
3866 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3868 switch (header_id) {
3869 case MLX4_NET_TRANS_RULE_ID_ETH:
3870 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3875 case MLX4_NET_TRANS_RULE_ID_IB:
3877 case MLX4_NET_TRANS_RULE_ID_IPV4:
3878 case MLX4_NET_TRANS_RULE_ID_TCP:
3879 case MLX4_NET_TRANS_RULE_ID_UDP:
3880 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3881 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3885 vhcr->in_modifier +=
3886 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3889 pr_err("Corrupted mailbox.\n");
3894 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3895 vhcr->in_modifier, 0,
3896 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3901 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3903 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3905 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3906 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3910 atomic_inc(&rqp->ref_count);
3912 put_res(dev, slave, qpn, RES_QP);
3916 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3917 struct mlx4_vhcr *vhcr,
3918 struct mlx4_cmd_mailbox *inbox,
3919 struct mlx4_cmd_mailbox *outbox,
3920 struct mlx4_cmd_info *cmd)
3924 struct res_fs_rule *rrule;
3926 if (dev->caps.steering_mode !=
3927 MLX4_STEERING_MODE_DEVICE_MANAGED)
3930 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3933 /* Release the rule form busy state before removal */
3934 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3935 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3939 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3940 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3943 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE,
3945 atomic_dec(&rqp->ref_count);
3948 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3954 put_res(dev, slave, rrule->qpn, RES_QP);
3959 BUSY_MAX_RETRIES = 10
3962 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3963 struct mlx4_vhcr *vhcr,
3964 struct mlx4_cmd_mailbox *inbox,
3965 struct mlx4_cmd_mailbox *outbox,
3966 struct mlx4_cmd_info *cmd)
3970 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3975 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3977 struct res_gid *rgid;
3978 struct res_gid *tmp;
3979 struct mlx4_qp qp; /* dummy for calling attach/detach */
3981 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3982 switch (dev->caps.steering_mode) {
3983 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3984 mlx4_flow_detach(dev, rgid->reg_id);
3986 case MLX4_STEERING_MODE_B0:
3987 qp.qpn = rqp->local_qpn;
3988 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3989 rgid->prot, rgid->steer);
3992 list_del(&rgid->list);
3997 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3998 enum mlx4_resource type, int print)
4000 struct mlx4_priv *priv = mlx4_priv(dev);
4001 struct mlx4_resource_tracker *tracker =
4002 &priv->mfunc.master.res_tracker;
4003 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4004 struct res_common *r;
4005 struct res_common *tmp;
4009 spin_lock_irq(mlx4_tlock(dev));
4010 list_for_each_entry_safe(r, tmp, rlist, list) {
4011 if (r->owner == slave) {
4013 if (r->state == RES_ANY_BUSY) {
4016 "%s id 0x%llx is busy\n",
4018 (unsigned long long)r->res_id);
4021 r->from_state = r->state;
4022 r->state = RES_ANY_BUSY;
4028 spin_unlock_irq(mlx4_tlock(dev));
4033 static int move_all_busy(struct mlx4_dev *dev, int slave,
4034 enum mlx4_resource type)
4036 unsigned long begin;
4041 busy = _move_all_busy(dev, slave, type, 0);
4042 if (time_after(jiffies, begin + 5 * HZ))
4049 busy = _move_all_busy(dev, slave, type, 1);
4053 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4055 struct mlx4_priv *priv = mlx4_priv(dev);
4056 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4057 struct list_head *qp_list =
4058 &tracker->slave_list[slave].res_list[RES_QP];
4066 err = move_all_busy(dev, slave, RES_QP);
4068 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
4069 "for slave %d\n", slave);
4071 spin_lock_irq(mlx4_tlock(dev));
4072 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4073 spin_unlock_irq(mlx4_tlock(dev));
4074 if (qp->com.owner == slave) {
4075 qpn = qp->com.res_id;
4076 detach_qp(dev, slave, qp);
4077 state = qp->com.from_state;
4078 while (state != 0) {
4080 case RES_QP_RESERVED:
4081 spin_lock_irq(mlx4_tlock(dev));
4082 rb_erase(&qp->com.node,
4083 &tracker->res_tree[RES_QP]);
4084 list_del(&qp->com.list);
4085 spin_unlock_irq(mlx4_tlock(dev));
4086 if (!valid_reserved(dev, slave, qpn)) {
4087 __mlx4_qp_release_range(dev, qpn, 1);
4088 mlx4_release_resource(dev, slave,
4095 if (!valid_reserved(dev, slave, qpn))
4096 __mlx4_qp_free_icm(dev, qpn);
4097 state = RES_QP_RESERVED;
4101 err = mlx4_cmd(dev, in_param,
4104 MLX4_CMD_TIME_CLASS_A,
4107 mlx4_dbg(dev, "rem_slave_qps: failed"
4108 " to move slave %d qpn %d to"
4111 atomic_dec(&qp->rcq->ref_count);
4112 atomic_dec(&qp->scq->ref_count);
4113 atomic_dec(&qp->mtt->ref_count);
4115 atomic_dec(&qp->srq->ref_count);
4116 state = RES_QP_MAPPED;
4123 spin_lock_irq(mlx4_tlock(dev));
4125 spin_unlock_irq(mlx4_tlock(dev));
4128 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4130 struct mlx4_priv *priv = mlx4_priv(dev);
4131 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4132 struct list_head *srq_list =
4133 &tracker->slave_list[slave].res_list[RES_SRQ];
4134 struct res_srq *srq;
4135 struct res_srq *tmp;
4142 err = move_all_busy(dev, slave, RES_SRQ);
4144 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
4145 "busy for slave %d\n", slave);
4147 spin_lock_irq(mlx4_tlock(dev));
4148 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4149 spin_unlock_irq(mlx4_tlock(dev));
4150 if (srq->com.owner == slave) {
4151 srqn = srq->com.res_id;
4152 state = srq->com.from_state;
4153 while (state != 0) {
4155 case RES_SRQ_ALLOCATED:
4156 __mlx4_srq_free_icm(dev, srqn);
4157 spin_lock_irq(mlx4_tlock(dev));
4158 rb_erase(&srq->com.node,
4159 &tracker->res_tree[RES_SRQ]);
4160 list_del(&srq->com.list);
4161 spin_unlock_irq(mlx4_tlock(dev));
4162 mlx4_release_resource(dev, slave,
4170 err = mlx4_cmd(dev, in_param, srqn, 1,
4172 MLX4_CMD_TIME_CLASS_A,
4175 mlx4_dbg(dev, "rem_slave_srqs: failed"
4176 " to move slave %d srq %d to"
4180 atomic_dec(&srq->mtt->ref_count);
4182 atomic_dec(&srq->cq->ref_count);
4183 state = RES_SRQ_ALLOCATED;
4191 spin_lock_irq(mlx4_tlock(dev));
4193 spin_unlock_irq(mlx4_tlock(dev));
4196 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4198 struct mlx4_priv *priv = mlx4_priv(dev);
4199 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4200 struct list_head *cq_list =
4201 &tracker->slave_list[slave].res_list[RES_CQ];
4210 err = move_all_busy(dev, slave, RES_CQ);
4212 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4213 "busy for slave %d\n", slave);
4215 spin_lock_irq(mlx4_tlock(dev));
4216 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4217 spin_unlock_irq(mlx4_tlock(dev));
4218 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4219 cqn = cq->com.res_id;
4220 state = cq->com.from_state;
4221 while (state != 0) {
4223 case RES_CQ_ALLOCATED:
4224 __mlx4_cq_free_icm(dev, cqn);
4225 spin_lock_irq(mlx4_tlock(dev));
4226 rb_erase(&cq->com.node,
4227 &tracker->res_tree[RES_CQ]);
4228 list_del(&cq->com.list);
4229 spin_unlock_irq(mlx4_tlock(dev));
4230 mlx4_release_resource(dev, slave,
4238 err = mlx4_cmd(dev, in_param, cqn, 1,
4240 MLX4_CMD_TIME_CLASS_A,
4243 mlx4_dbg(dev, "rem_slave_cqs: failed"
4244 " to move slave %d cq %d to"
4247 atomic_dec(&cq->mtt->ref_count);
4248 state = RES_CQ_ALLOCATED;
4256 spin_lock_irq(mlx4_tlock(dev));
4258 spin_unlock_irq(mlx4_tlock(dev));
4261 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4263 struct mlx4_priv *priv = mlx4_priv(dev);
4264 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4265 struct list_head *mpt_list =
4266 &tracker->slave_list[slave].res_list[RES_MPT];
4267 struct res_mpt *mpt;
4268 struct res_mpt *tmp;
4275 err = move_all_busy(dev, slave, RES_MPT);
4277 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4278 "busy for slave %d\n", slave);
4280 spin_lock_irq(mlx4_tlock(dev));
4281 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4282 spin_unlock_irq(mlx4_tlock(dev));
4283 if (mpt->com.owner == slave) {
4284 mptn = mpt->com.res_id;
4285 state = mpt->com.from_state;
4286 while (state != 0) {
4288 case RES_MPT_RESERVED:
4289 __mlx4_mpt_release(dev, mpt->key);
4290 spin_lock_irq(mlx4_tlock(dev));
4291 rb_erase(&mpt->com.node,
4292 &tracker->res_tree[RES_MPT]);
4293 list_del(&mpt->com.list);
4294 spin_unlock_irq(mlx4_tlock(dev));
4295 mlx4_release_resource(dev, slave,
4301 case RES_MPT_MAPPED:
4302 __mlx4_mpt_free_icm(dev, mpt->key);
4303 state = RES_MPT_RESERVED;
4308 err = mlx4_cmd(dev, in_param, mptn, 0,
4310 MLX4_CMD_TIME_CLASS_A,
4313 mlx4_dbg(dev, "rem_slave_mrs: failed"
4314 " to move slave %d mpt %d to"
4318 atomic_dec(&mpt->mtt->ref_count);
4319 state = RES_MPT_MAPPED;
4326 spin_lock_irq(mlx4_tlock(dev));
4328 spin_unlock_irq(mlx4_tlock(dev));
4331 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4333 struct mlx4_priv *priv = mlx4_priv(dev);
4334 struct mlx4_resource_tracker *tracker =
4335 &priv->mfunc.master.res_tracker;
4336 struct list_head *mtt_list =
4337 &tracker->slave_list[slave].res_list[RES_MTT];
4338 struct res_mtt *mtt;
4339 struct res_mtt *tmp;
4345 err = move_all_busy(dev, slave, RES_MTT);
4347 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4348 "busy for slave %d\n", slave);
4350 spin_lock_irq(mlx4_tlock(dev));
4351 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4352 spin_unlock_irq(mlx4_tlock(dev));
4353 if (mtt->com.owner == slave) {
4354 base = mtt->com.res_id;
4355 state = mtt->com.from_state;
4356 while (state != 0) {
4358 case RES_MTT_ALLOCATED:
4359 __mlx4_free_mtt_range(dev, base,
4361 spin_lock_irq(mlx4_tlock(dev));
4362 rb_erase(&mtt->com.node,
4363 &tracker->res_tree[RES_MTT]);
4364 list_del(&mtt->com.list);
4365 spin_unlock_irq(mlx4_tlock(dev));
4366 mlx4_release_resource(dev, slave, RES_MTT,
4367 1 << mtt->order, 0);
4377 spin_lock_irq(mlx4_tlock(dev));
4379 spin_unlock_irq(mlx4_tlock(dev));
4382 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4384 struct mlx4_priv *priv = mlx4_priv(dev);
4385 struct mlx4_resource_tracker *tracker =
4386 &priv->mfunc.master.res_tracker;
4387 struct list_head *fs_rule_list =
4388 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4389 struct res_fs_rule *fs_rule;
4390 struct res_fs_rule *tmp;
4395 err = move_all_busy(dev, slave, RES_FS_RULE);
4397 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4400 spin_lock_irq(mlx4_tlock(dev));
4401 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4402 spin_unlock_irq(mlx4_tlock(dev));
4403 if (fs_rule->com.owner == slave) {
4404 base = fs_rule->com.res_id;
4405 state = fs_rule->com.from_state;
4406 while (state != 0) {
4408 case RES_FS_RULE_ALLOCATED:
4410 err = mlx4_cmd(dev, base, 0, 0,
4411 MLX4_QP_FLOW_STEERING_DETACH,
4412 MLX4_CMD_TIME_CLASS_A,
4415 spin_lock_irq(mlx4_tlock(dev));
4416 rb_erase(&fs_rule->com.node,
4417 &tracker->res_tree[RES_FS_RULE]);
4418 list_del(&fs_rule->com.list);
4419 spin_unlock_irq(mlx4_tlock(dev));
4429 spin_lock_irq(mlx4_tlock(dev));
4431 spin_unlock_irq(mlx4_tlock(dev));
4434 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4436 struct mlx4_priv *priv = mlx4_priv(dev);
4437 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4438 struct list_head *eq_list =
4439 &tracker->slave_list[slave].res_list[RES_EQ];
4446 struct mlx4_cmd_mailbox *mailbox;
4448 err = move_all_busy(dev, slave, RES_EQ);
4450 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4451 "busy for slave %d\n", slave);
4453 spin_lock_irq(mlx4_tlock(dev));
4454 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4455 spin_unlock_irq(mlx4_tlock(dev));
4456 if (eq->com.owner == slave) {
4457 eqn = eq->com.res_id;
4458 state = eq->com.from_state;
4459 while (state != 0) {
4461 case RES_EQ_RESERVED:
4462 spin_lock_irq(mlx4_tlock(dev));
4463 rb_erase(&eq->com.node,
4464 &tracker->res_tree[RES_EQ]);
4465 list_del(&eq->com.list);
4466 spin_unlock_irq(mlx4_tlock(dev));
4472 mailbox = mlx4_alloc_cmd_mailbox(dev);
4473 if (IS_ERR(mailbox)) {
4477 err = mlx4_cmd_box(dev, slave, 0,
4480 MLX4_CMD_TIME_CLASS_A,
4483 mlx4_dbg(dev, "rem_slave_eqs: failed"
4484 " to move slave %d eqs %d to"
4485 " SW ownership\n", slave, eqn);
4486 mlx4_free_cmd_mailbox(dev, mailbox);
4487 atomic_dec(&eq->mtt->ref_count);
4488 state = RES_EQ_RESERVED;
4496 spin_lock_irq(mlx4_tlock(dev));
4498 spin_unlock_irq(mlx4_tlock(dev));
4501 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4503 __mlx4_slave_counters_free(dev, slave);
4506 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4508 struct mlx4_priv *priv = mlx4_priv(dev);
4509 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4510 struct list_head *xrcdn_list =
4511 &tracker->slave_list[slave].res_list[RES_XRCD];
4512 struct res_xrcdn *xrcd;
4513 struct res_xrcdn *tmp;
4517 err = move_all_busy(dev, slave, RES_XRCD);
4519 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4520 "busy for slave %d\n", slave);
4522 spin_lock_irq(mlx4_tlock(dev));
4523 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4524 if (xrcd->com.owner == slave) {
4525 xrcdn = xrcd->com.res_id;
4526 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4527 list_del(&xrcd->com.list);
4529 __mlx4_xrcd_free(dev, xrcdn);
4532 spin_unlock_irq(mlx4_tlock(dev));
4535 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4537 struct mlx4_priv *priv = mlx4_priv(dev);
4539 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4540 rem_slave_macs(dev, slave);
4541 rem_slave_vlans(dev, slave);
4542 rem_slave_fs_rule(dev, slave);
4543 rem_slave_qps(dev, slave);
4544 rem_slave_srqs(dev, slave);
4545 rem_slave_cqs(dev, slave);
4546 rem_slave_mrs(dev, slave);
4547 rem_slave_eqs(dev, slave);
4548 rem_slave_mtts(dev, slave);
4549 rem_slave_counters(dev, slave);
4550 rem_slave_xrcdns(dev, slave);
4551 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4554 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4556 struct mlx4_vf_immed_vlan_work *work =
4557 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4558 struct mlx4_cmd_mailbox *mailbox;
4559 struct mlx4_update_qp_context *upd_context;
4560 struct mlx4_dev *dev = &work->priv->dev;
4561 struct mlx4_resource_tracker *tracker =
4562 &work->priv->mfunc.master.res_tracker;
4563 struct list_head *qp_list =
4564 &tracker->slave_list[work->slave].res_list[RES_QP];
4567 u64 qp_path_mask_vlan_ctrl =
4568 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4569 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4570 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4571 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4572 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4573 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4575 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4576 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4577 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4578 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4579 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4580 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4581 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4584 int port, errors = 0;
4587 if (mlx4_is_slave(dev)) {
4588 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4593 mailbox = mlx4_alloc_cmd_mailbox(dev);
4594 if (IS_ERR(mailbox))
4598 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4599 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4601 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4602 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4603 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4605 upd_context = mailbox->buf;
4606 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4608 spin_lock_irq(mlx4_tlock(dev));
4609 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4610 spin_unlock_irq(mlx4_tlock(dev));
4611 if (qp->com.owner == work->slave) {
4612 if (qp->com.from_state != RES_QP_HW ||
4613 !qp->sched_queue || /* no INIT2RTR trans yet */
4614 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4615 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4616 spin_lock_irq(mlx4_tlock(dev));
4619 port = (qp->sched_queue >> 6 & 1) + 1;
4620 if (port != work->port) {
4621 spin_lock_irq(mlx4_tlock(dev));
4624 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4625 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4627 upd_context->primary_addr_path_mask =
4628 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4629 if (work->vlan_id == MLX4_VGT) {
4630 upd_context->qp_context.param3 = qp->param3;
4631 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4632 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4633 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4634 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4635 upd_context->qp_context.pri_path.feup = qp->feup;
4636 upd_context->qp_context.pri_path.sched_queue =
4639 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4640 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4641 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4642 upd_context->qp_context.pri_path.fvl_rx =
4643 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4644 upd_context->qp_context.pri_path.fl =
4645 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4646 upd_context->qp_context.pri_path.feup =
4647 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4648 upd_context->qp_context.pri_path.sched_queue =
4649 qp->sched_queue & 0xC7;
4650 upd_context->qp_context.pri_path.sched_queue |=
4651 ((work->qos & 0x7) << 3);
4654 err = mlx4_cmd(dev, mailbox->dma,
4655 qp->local_qpn & 0xffffff,
4656 0, MLX4_CMD_UPDATE_QP,
4657 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4659 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4660 "port %d, qpn %d (%d)\n",
4661 work->slave, port, qp->local_qpn,
4666 spin_lock_irq(mlx4_tlock(dev));
4668 spin_unlock_irq(mlx4_tlock(dev));
4669 mlx4_free_cmd_mailbox(dev, mailbox);
4672 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4673 errors, work->slave, work->port);
4675 /* unregister previous vlan_id if needed and we had no errors
4676 * while updating the QPs
4678 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4679 NO_INDX != work->orig_vlan_ix)
4680 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4681 work->orig_vlan_id);