2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <dev/mlx4/cmd.h>
43 #include <dev/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
51 #define MLX4_PF_COUNTERS_PER_PORT 2
52 #define MLX4_VF_COUNTERS_PER_PORT 1
55 struct list_head list;
63 struct list_head list;
71 struct list_head list;
86 struct list_head list;
88 enum mlx4_protocol prot;
89 enum mlx4_steer_type steer;
94 RES_QP_BUSY = RES_ANY_BUSY,
96 /* QP number was allocated */
99 /* ICM memory for QP context was mapped */
102 /* QP is in hw ownership */
107 struct res_common com;
112 struct list_head mcg_list;
117 /* saved qp params before VST enforcement in order to restore on VGT */
127 enum res_mtt_states {
128 RES_MTT_BUSY = RES_ANY_BUSY,
132 static inline const char *mtt_states_str(enum res_mtt_states state)
135 case RES_MTT_BUSY: return "RES_MTT_BUSY";
136 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
137 default: return "Unknown";
142 struct res_common com;
147 enum res_mpt_states {
148 RES_MPT_BUSY = RES_ANY_BUSY,
155 struct res_common com;
161 RES_EQ_BUSY = RES_ANY_BUSY,
167 struct res_common com;
172 RES_CQ_BUSY = RES_ANY_BUSY,
178 struct res_common com;
183 enum res_srq_states {
184 RES_SRQ_BUSY = RES_ANY_BUSY,
190 struct res_common com;
196 enum res_counter_states {
197 RES_COUNTER_BUSY = RES_ANY_BUSY,
198 RES_COUNTER_ALLOCATED,
202 struct res_common com;
206 enum res_xrcdn_states {
207 RES_XRCD_BUSY = RES_ANY_BUSY,
212 struct res_common com;
216 enum res_fs_rule_states {
217 RES_FS_RULE_BUSY = RES_ANY_BUSY,
218 RES_FS_RULE_ALLOCATED,
222 struct res_common com;
224 /* VF DMFS mbox with port flipped */
226 /* > 0 --> apply mirror when getting into HA mode */
227 /* = 0 --> un-apply mirror when getting out of HA mode */
229 struct list_head mirr_list;
233 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
235 struct rb_node *node = root->rb_node;
238 struct res_common *res = container_of(node, struct res_common,
241 if (res_id < res->res_id)
242 node = node->rb_left;
243 else if (res_id > res->res_id)
244 node = node->rb_right;
251 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
253 struct rb_node **new = &(root->rb_node), *parent = NULL;
255 /* Figure out where to put new node */
257 struct res_common *this = container_of(*new, struct res_common,
261 if (res->res_id < this->res_id)
262 new = &((*new)->rb_left);
263 else if (res->res_id > this->res_id)
264 new = &((*new)->rb_right);
269 /* Add new node and rebalance tree. */
270 rb_link_node(&res->node, parent, new);
271 rb_insert_color(&res->node, root);
286 static const char *resource_str(enum mlx4_resource rt)
289 case RES_QP: return "RES_QP";
290 case RES_CQ: return "RES_CQ";
291 case RES_SRQ: return "RES_SRQ";
292 case RES_MPT: return "RES_MPT";
293 case RES_MTT: return "RES_MTT";
294 case RES_MAC: return "RES_MAC";
295 case RES_VLAN: return "RES_VLAN";
296 case RES_EQ: return "RES_EQ";
297 case RES_COUNTER: return "RES_COUNTER";
298 case RES_FS_RULE: return "RES_FS_RULE";
299 case RES_XRCD: return "RES_XRCD";
300 default: return "Unknown resource type !!!";
304 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
305 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
306 enum mlx4_resource res_type, int count,
309 struct mlx4_priv *priv = mlx4_priv(dev);
310 struct resource_allocator *res_alloc =
311 &priv->mfunc.master.res_tracker.res_alloc[res_type];
313 int allocated, free, reserved, guaranteed, from_free;
316 if (slave > dev->persist->num_vfs)
319 spin_lock(&res_alloc->alloc_lock);
320 allocated = (port > 0) ?
321 res_alloc->allocated[(port - 1) *
322 (dev->persist->num_vfs + 1) + slave] :
323 res_alloc->allocated[slave];
324 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
326 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
327 res_alloc->res_reserved;
328 guaranteed = res_alloc->guaranteed[slave];
330 if (allocated + count > res_alloc->quota[slave]) {
331 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
332 slave, port, resource_str(res_type), count,
333 allocated, res_alloc->quota[slave]);
337 if (allocated + count <= guaranteed) {
341 /* portion may need to be obtained from free area */
342 if (guaranteed - allocated > 0)
343 from_free = count - (guaranteed - allocated);
347 from_rsvd = count - from_free;
349 if (free - from_free >= reserved)
352 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
353 slave, port, resource_str(res_type), free,
354 from_free, reserved);
358 /* grant the request */
360 res_alloc->allocated[(port - 1) *
361 (dev->persist->num_vfs + 1) + slave] += count;
362 res_alloc->res_port_free[port - 1] -= count;
363 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
365 res_alloc->allocated[slave] += count;
366 res_alloc->res_free -= count;
367 res_alloc->res_reserved -= from_rsvd;
372 spin_unlock(&res_alloc->alloc_lock);
376 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
377 enum mlx4_resource res_type, int count,
380 struct mlx4_priv *priv = mlx4_priv(dev);
381 struct resource_allocator *res_alloc =
382 &priv->mfunc.master.res_tracker.res_alloc[res_type];
383 int allocated, guaranteed, from_rsvd;
385 if (slave > dev->persist->num_vfs)
388 spin_lock(&res_alloc->alloc_lock);
390 allocated = (port > 0) ?
391 res_alloc->allocated[(port - 1) *
392 (dev->persist->num_vfs + 1) + slave] :
393 res_alloc->allocated[slave];
394 guaranteed = res_alloc->guaranteed[slave];
396 if (allocated - count >= guaranteed) {
399 /* portion may need to be returned to reserved area */
400 if (allocated - guaranteed > 0)
401 from_rsvd = count - (allocated - guaranteed);
407 res_alloc->allocated[(port - 1) *
408 (dev->persist->num_vfs + 1) + slave] -= count;
409 res_alloc->res_port_free[port - 1] += count;
410 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
412 res_alloc->allocated[slave] -= count;
413 res_alloc->res_free += count;
414 res_alloc->res_reserved += from_rsvd;
417 spin_unlock(&res_alloc->alloc_lock);
421 static inline void initialize_res_quotas(struct mlx4_dev *dev,
422 struct resource_allocator *res_alloc,
423 enum mlx4_resource res_type,
424 int vf, int num_instances)
426 res_alloc->guaranteed[vf] = num_instances /
427 (2 * (dev->persist->num_vfs + 1));
428 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
429 if (vf == mlx4_master_func_num(dev)) {
430 res_alloc->res_free = num_instances;
431 if (res_type == RES_MTT) {
432 /* reserved mtts will be taken out of the PF allocation */
433 res_alloc->res_free += dev->caps.reserved_mtts;
434 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
435 res_alloc->quota[vf] += dev->caps.reserved_mtts;
440 void mlx4_init_quotas(struct mlx4_dev *dev)
442 struct mlx4_priv *priv = mlx4_priv(dev);
445 /* quotas for VFs are initialized in mlx4_slave_cap */
446 if (mlx4_is_slave(dev))
449 if (!mlx4_is_mfunc(dev)) {
450 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
451 mlx4_num_reserved_sqps(dev);
452 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
453 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
454 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
455 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
459 pf = mlx4_master_func_num(dev);
461 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
463 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
465 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
467 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
469 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
472 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
474 /* reduce the sink counter */
475 return (dev->caps.max_counters - 1 -
476 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
480 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
482 struct mlx4_priv *priv = mlx4_priv(dev);
485 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
487 priv->mfunc.master.res_tracker.slave_list =
488 kzalloc(dev->num_slaves * sizeof(struct slave_list),
490 if (!priv->mfunc.master.res_tracker.slave_list)
493 for (i = 0 ; i < dev->num_slaves; i++) {
494 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
495 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
496 slave_list[i].res_list[t]);
497 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
500 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
502 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
503 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
505 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
506 struct resource_allocator *res_alloc =
507 &priv->mfunc.master.res_tracker.res_alloc[i];
508 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
509 sizeof(int), GFP_KERNEL);
510 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
511 sizeof(int), GFP_KERNEL);
512 if (i == RES_MAC || i == RES_VLAN)
513 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
514 (dev->persist->num_vfs
516 sizeof(int), GFP_KERNEL);
518 res_alloc->allocated = kzalloc((dev->persist->
520 sizeof(int), GFP_KERNEL);
521 /* Reduce the sink counter */
522 if (i == RES_COUNTER)
523 res_alloc->res_free = dev->caps.max_counters - 1;
525 if (!res_alloc->quota || !res_alloc->guaranteed ||
526 !res_alloc->allocated)
529 spin_lock_init(&res_alloc->alloc_lock);
530 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
531 struct mlx4_active_ports actv_ports =
532 mlx4_get_active_ports(dev, t);
535 initialize_res_quotas(dev, res_alloc, RES_QP,
536 t, dev->caps.num_qps -
537 dev->caps.reserved_qps -
538 mlx4_num_reserved_sqps(dev));
541 initialize_res_quotas(dev, res_alloc, RES_CQ,
542 t, dev->caps.num_cqs -
543 dev->caps.reserved_cqs);
546 initialize_res_quotas(dev, res_alloc, RES_SRQ,
547 t, dev->caps.num_srqs -
548 dev->caps.reserved_srqs);
551 initialize_res_quotas(dev, res_alloc, RES_MPT,
552 t, dev->caps.num_mpts -
553 dev->caps.reserved_mrws);
556 initialize_res_quotas(dev, res_alloc, RES_MTT,
557 t, dev->caps.num_mtts -
558 dev->caps.reserved_mtts);
561 if (t == mlx4_master_func_num(dev)) {
562 int max_vfs_pport = 0;
563 /* Calculate the max vfs per port for */
565 for (j = 0; j < dev->caps.num_ports;
567 struct mlx4_slaves_pport slaves_pport =
568 mlx4_phys_to_slaves_pport(dev, j + 1);
569 unsigned current_slaves =
570 bitmap_weight(slaves_pport.slaves,
571 dev->caps.num_ports) - 1;
572 if (max_vfs_pport < current_slaves)
576 res_alloc->quota[t] =
579 res_alloc->guaranteed[t] = 2;
580 for (j = 0; j < MLX4_MAX_PORTS; j++)
581 res_alloc->res_port_free[j] =
584 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
585 res_alloc->guaranteed[t] = 2;
589 if (t == mlx4_master_func_num(dev)) {
590 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
591 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
592 for (j = 0; j < MLX4_MAX_PORTS; j++)
593 res_alloc->res_port_free[j] =
596 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
597 res_alloc->guaranteed[t] = 0;
601 res_alloc->quota[t] = dev->caps.max_counters;
602 if (t == mlx4_master_func_num(dev))
603 res_alloc->guaranteed[t] =
604 MLX4_PF_COUNTERS_PER_PORT *
606 else if (t <= max_vfs_guarantee_counter)
607 res_alloc->guaranteed[t] =
608 MLX4_VF_COUNTERS_PER_PORT *
611 res_alloc->guaranteed[t] = 0;
612 res_alloc->res_free -= res_alloc->guaranteed[t];
617 if (i == RES_MAC || i == RES_VLAN) {
618 for (j = 0; j < dev->caps.num_ports; j++)
619 if (test_bit(j, actv_ports.ports))
620 res_alloc->res_port_rsvd[j] +=
621 res_alloc->guaranteed[t];
623 res_alloc->res_reserved += res_alloc->guaranteed[t];
627 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
631 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
632 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
633 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
634 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
635 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
636 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
637 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
642 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
643 enum mlx4_res_tracker_free_type type)
645 struct mlx4_priv *priv = mlx4_priv(dev);
648 if (priv->mfunc.master.res_tracker.slave_list) {
649 if (type != RES_TR_FREE_STRUCTS_ONLY) {
650 for (i = 0; i < dev->num_slaves; i++) {
651 if (type == RES_TR_FREE_ALL ||
652 dev->caps.function != i)
653 mlx4_delete_all_resources_for_slave(dev, i);
655 /* free master's vlans */
656 i = dev->caps.function;
657 mlx4_reset_roce_gids(dev, i);
658 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
659 rem_slave_vlans(dev, i);
660 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
663 if (type != RES_TR_FREE_SLAVES_ONLY) {
664 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
665 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
666 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
667 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
668 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
669 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
670 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
672 kfree(priv->mfunc.master.res_tracker.slave_list);
673 priv->mfunc.master.res_tracker.slave_list = NULL;
678 static void update_pkey_index(struct mlx4_dev *dev, int slave,
679 struct mlx4_cmd_mailbox *inbox)
681 u8 sched = *(u8 *)(inbox->buf + 64);
682 u8 orig_index = *(u8 *)(inbox->buf + 35);
684 struct mlx4_priv *priv = mlx4_priv(dev);
687 port = (sched >> 6 & 1) + 1;
689 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
690 *(u8 *)(inbox->buf + 35) = new_index;
693 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
696 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
697 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
698 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
701 if (MLX4_QP_ST_UD == ts) {
702 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
703 if (mlx4_is_eth(dev, port))
704 qp_ctx->pri_path.mgid_index =
705 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
707 qp_ctx->pri_path.mgid_index = slave | 0x80;
709 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
710 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
711 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
712 if (mlx4_is_eth(dev, port)) {
713 qp_ctx->pri_path.mgid_index +=
714 mlx4_get_base_gid_ix(dev, slave, port);
715 qp_ctx->pri_path.mgid_index &= 0x7f;
717 qp_ctx->pri_path.mgid_index = slave & 0x7F;
720 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
721 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
722 if (mlx4_is_eth(dev, port)) {
723 qp_ctx->alt_path.mgid_index +=
724 mlx4_get_base_gid_ix(dev, slave, port);
725 qp_ctx->alt_path.mgid_index &= 0x7f;
727 qp_ctx->alt_path.mgid_index = slave & 0x7F;
733 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
736 static int update_vport_qp_param(struct mlx4_dev *dev,
737 struct mlx4_cmd_mailbox *inbox,
740 struct mlx4_qp_context *qpc = inbox->buf + 8;
741 struct mlx4_vport_oper_state *vp_oper;
742 struct mlx4_priv *priv;
746 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
747 priv = mlx4_priv(dev);
748 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
749 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
751 err = handle_counter(dev, qpc, slave, port);
755 if (MLX4_VGT != vp_oper->state.default_vlan) {
756 /* the reserved QPs (special, proxy, tunnel)
757 * do not operate over vlans
759 if (mlx4_is_qp_reserved(dev, qpn))
762 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
763 if (qp_type == MLX4_QP_ST_UD ||
764 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
765 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
766 *(__be32 *)inbox->buf =
767 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
768 MLX4_QP_OPTPAR_VLAN_STRIPPING);
769 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
771 struct mlx4_update_qp_params params = {.flags = 0};
773 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
779 /* preserve IF_COUNTER flag */
780 qpc->pri_path.vlan_control &=
781 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
782 if (1 /*vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE*/ &&
783 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
784 qpc->pri_path.vlan_control |=
785 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
786 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
788 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
789 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
791 } else if (0 != vp_oper->state.default_vlan) {
792 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
793 /* vst QinQ should block untagged on TX,
794 * but cvlan is in payload and phv is set so
795 * hw see it as untagged. Block tagged instead.
797 qpc->pri_path.vlan_control |=
798 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
799 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
800 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
801 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
802 } else { /* vst 802.1Q */
803 qpc->pri_path.vlan_control |=
804 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
805 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
806 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
808 } else { /* priority tagged */
809 qpc->pri_path.vlan_control |=
810 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
811 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
814 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
815 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
816 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
817 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
818 qpc->pri_path.fl |= MLX4_FL_SV;
820 qpc->pri_path.fl |= MLX4_FL_CV;
821 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
822 qpc->pri_path.sched_queue &= 0xC7;
823 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
824 qpc->qos_vport = vp_oper->state.qos_vport;
826 if (vp_oper->state.spoofchk) {
827 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
828 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
834 static int mpt_mask(struct mlx4_dev *dev)
836 return dev->caps.num_mpts - 1;
839 static void *find_res(struct mlx4_dev *dev, u64 res_id,
840 enum mlx4_resource type)
842 struct mlx4_priv *priv = mlx4_priv(dev);
844 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
848 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
849 enum mlx4_resource type,
852 struct res_common *r;
855 spin_lock_irq(mlx4_tlock(dev));
856 r = find_res(dev, res_id, type);
862 if (r->state == RES_ANY_BUSY) {
867 if (r->owner != slave) {
872 r->from_state = r->state;
873 r->state = RES_ANY_BUSY;
876 *((struct res_common **)res) = r;
879 spin_unlock_irq(mlx4_tlock(dev));
883 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
884 enum mlx4_resource type,
885 u64 res_id, int *slave)
888 struct res_common *r;
894 spin_lock(mlx4_tlock(dev));
896 r = find_res(dev, id, type);
901 spin_unlock(mlx4_tlock(dev));
906 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
907 enum mlx4_resource type)
909 struct res_common *r;
911 spin_lock_irq(mlx4_tlock(dev));
912 r = find_res(dev, res_id, type);
914 r->state = r->from_state;
915 spin_unlock_irq(mlx4_tlock(dev));
918 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
919 u64 in_param, u64 *out_param, int port);
921 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
924 struct res_common *r;
925 struct res_counter *counter;
928 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
931 spin_lock_irq(mlx4_tlock(dev));
932 r = find_res(dev, counter_index, RES_COUNTER);
933 if (!r || r->owner != slave) {
936 counter = container_of(r, struct res_counter, com);
938 counter->port = port;
941 spin_unlock_irq(mlx4_tlock(dev));
945 static int handle_unexisting_counter(struct mlx4_dev *dev,
946 struct mlx4_qp_context *qpc, u8 slave,
949 struct mlx4_priv *priv = mlx4_priv(dev);
950 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
951 struct res_common *tmp;
952 struct res_counter *counter;
953 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
956 spin_lock_irq(mlx4_tlock(dev));
957 list_for_each_entry(tmp,
958 &tracker->slave_list[slave].res_list[RES_COUNTER],
960 counter = container_of(tmp, struct res_counter, com);
961 if (port == counter->port) {
962 qpc->pri_path.counter_index = counter->com.res_id;
963 spin_unlock_irq(mlx4_tlock(dev));
967 spin_unlock_irq(mlx4_tlock(dev));
969 /* No existing counter, need to allocate a new counter */
970 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
972 if (err == -ENOENT) {
974 } else if (err && err != -ENOSPC) {
975 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
976 __func__, slave, err);
978 qpc->pri_path.counter_index = counter_idx;
979 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
980 __func__, slave, qpc->pri_path.counter_index);
987 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
990 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
991 return handle_existing_counter(dev, slave, port,
992 qpc->pri_path.counter_index);
994 return handle_unexisting_counter(dev, qpc, slave, port);
997 static struct res_common *alloc_qp_tr(int id)
1001 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1005 ret->com.res_id = id;
1006 ret->com.state = RES_QP_RESERVED;
1007 ret->local_qpn = id;
1008 INIT_LIST_HEAD(&ret->mcg_list);
1009 spin_lock_init(&ret->mcg_spl);
1010 atomic_set(&ret->ref_count, 0);
1015 static struct res_common *alloc_mtt_tr(int id, int order)
1017 struct res_mtt *ret;
1019 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1023 ret->com.res_id = id;
1025 ret->com.state = RES_MTT_ALLOCATED;
1026 atomic_set(&ret->ref_count, 0);
1031 static struct res_common *alloc_mpt_tr(int id, int key)
1033 struct res_mpt *ret;
1035 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1039 ret->com.res_id = id;
1040 ret->com.state = RES_MPT_RESERVED;
1046 static struct res_common *alloc_eq_tr(int id)
1050 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1054 ret->com.res_id = id;
1055 ret->com.state = RES_EQ_RESERVED;
1060 static struct res_common *alloc_cq_tr(int id)
1064 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1068 ret->com.res_id = id;
1069 ret->com.state = RES_CQ_ALLOCATED;
1070 atomic_set(&ret->ref_count, 0);
1075 static struct res_common *alloc_srq_tr(int id)
1077 struct res_srq *ret;
1079 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1083 ret->com.res_id = id;
1084 ret->com.state = RES_SRQ_ALLOCATED;
1085 atomic_set(&ret->ref_count, 0);
1090 static struct res_common *alloc_counter_tr(int id, int port)
1092 struct res_counter *ret;
1094 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1098 ret->com.res_id = id;
1099 ret->com.state = RES_COUNTER_ALLOCATED;
1105 static struct res_common *alloc_xrcdn_tr(int id)
1107 struct res_xrcdn *ret;
1109 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1113 ret->com.res_id = id;
1114 ret->com.state = RES_XRCD_ALLOCATED;
1119 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1121 struct res_fs_rule *ret;
1123 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1127 ret->com.res_id = id;
1128 ret->com.state = RES_FS_RULE_ALLOCATED;
1133 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1136 struct res_common *ret;
1140 ret = alloc_qp_tr(id);
1143 ret = alloc_mpt_tr(id, extra);
1146 ret = alloc_mtt_tr(id, extra);
1149 ret = alloc_eq_tr(id);
1152 ret = alloc_cq_tr(id);
1155 ret = alloc_srq_tr(id);
1158 pr_err("implementation missing\n");
1161 ret = alloc_counter_tr(id, extra);
1164 ret = alloc_xrcdn_tr(id);
1167 ret = alloc_fs_rule_tr(id, extra);
1178 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1179 struct mlx4_counter *data)
1181 struct mlx4_priv *priv = mlx4_priv(dev);
1182 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1183 struct res_common *tmp;
1184 struct res_counter *counter;
1188 memset(data, 0, sizeof(*data));
1190 counters_arr = kmalloc_array(dev->caps.max_counters,
1191 sizeof(*counters_arr), GFP_KERNEL);
1195 spin_lock_irq(mlx4_tlock(dev));
1196 list_for_each_entry(tmp,
1197 &tracker->slave_list[slave].res_list[RES_COUNTER],
1199 counter = container_of(tmp, struct res_counter, com);
1200 if (counter->port == port) {
1201 counters_arr[i] = (int)tmp->res_id;
1205 spin_unlock_irq(mlx4_tlock(dev));
1206 counters_arr[i] = -1;
1210 while (counters_arr[i] != -1) {
1211 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1214 memset(data, 0, sizeof(*data));
1221 kfree(counters_arr);
1225 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1226 enum mlx4_resource type, int extra)
1230 struct mlx4_priv *priv = mlx4_priv(dev);
1231 struct res_common **res_arr;
1232 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1233 struct rb_root *root = &tracker->res_tree[type];
1235 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1239 for (i = 0; i < count; ++i) {
1240 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1242 for (--i; i >= 0; --i)
1250 spin_lock_irq(mlx4_tlock(dev));
1251 for (i = 0; i < count; ++i) {
1252 if (find_res(dev, base + i, type)) {
1256 err = res_tracker_insert(root, res_arr[i]);
1259 list_add_tail(&res_arr[i]->list,
1260 &tracker->slave_list[slave].res_list[type]);
1262 spin_unlock_irq(mlx4_tlock(dev));
1268 for (--i; i >= 0; --i) {
1269 rb_erase(&res_arr[i]->node, root);
1270 list_del_init(&res_arr[i]->list);
1273 spin_unlock_irq(mlx4_tlock(dev));
1275 for (i = 0; i < count; ++i)
1283 static int remove_qp_ok(struct res_qp *res)
1285 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1286 !list_empty(&res->mcg_list)) {
1287 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1288 res->com.state, atomic_read(&res->ref_count));
1290 } else if (res->com.state != RES_QP_RESERVED) {
1297 static int remove_mtt_ok(struct res_mtt *res, int order)
1299 if (res->com.state == RES_MTT_BUSY ||
1300 atomic_read(&res->ref_count)) {
1301 pr_devel("%s-%d: state %s, ref_count %d\n",
1303 mtt_states_str(res->com.state),
1304 atomic_read(&res->ref_count));
1306 } else if (res->com.state != RES_MTT_ALLOCATED)
1308 else if (res->order != order)
1314 static int remove_mpt_ok(struct res_mpt *res)
1316 if (res->com.state == RES_MPT_BUSY)
1318 else if (res->com.state != RES_MPT_RESERVED)
1324 static int remove_eq_ok(struct res_eq *res)
1326 if (res->com.state == RES_MPT_BUSY)
1328 else if (res->com.state != RES_MPT_RESERVED)
1334 static int remove_counter_ok(struct res_counter *res)
1336 if (res->com.state == RES_COUNTER_BUSY)
1338 else if (res->com.state != RES_COUNTER_ALLOCATED)
1344 static int remove_xrcdn_ok(struct res_xrcdn *res)
1346 if (res->com.state == RES_XRCD_BUSY)
1348 else if (res->com.state != RES_XRCD_ALLOCATED)
1354 static int remove_fs_rule_ok(struct res_fs_rule *res)
1356 if (res->com.state == RES_FS_RULE_BUSY)
1358 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1364 static int remove_cq_ok(struct res_cq *res)
1366 if (res->com.state == RES_CQ_BUSY)
1368 else if (res->com.state != RES_CQ_ALLOCATED)
1374 static int remove_srq_ok(struct res_srq *res)
1376 if (res->com.state == RES_SRQ_BUSY)
1378 else if (res->com.state != RES_SRQ_ALLOCATED)
1384 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1388 return remove_qp_ok((struct res_qp *)res);
1390 return remove_cq_ok((struct res_cq *)res);
1392 return remove_srq_ok((struct res_srq *)res);
1394 return remove_mpt_ok((struct res_mpt *)res);
1396 return remove_mtt_ok((struct res_mtt *)res, extra);
1400 return remove_eq_ok((struct res_eq *)res);
1402 return remove_counter_ok((struct res_counter *)res);
1404 return remove_xrcdn_ok((struct res_xrcdn *)res);
1406 return remove_fs_rule_ok((struct res_fs_rule *)res);
1412 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1413 enum mlx4_resource type, int extra)
1417 struct mlx4_priv *priv = mlx4_priv(dev);
1418 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1419 struct res_common *r;
1421 spin_lock_irq(mlx4_tlock(dev));
1422 for (i = base; i < base + count; ++i) {
1423 r = res_tracker_lookup(&tracker->res_tree[type], i);
1428 if (r->owner != slave) {
1432 err = remove_ok(r, type, extra);
1437 for (i = base; i < base + count; ++i) {
1438 r = res_tracker_lookup(&tracker->res_tree[type], i);
1439 rb_erase(&r->node, &tracker->res_tree[type]);
1446 spin_unlock_irq(mlx4_tlock(dev));
1451 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1452 enum res_qp_states state, struct res_qp **qp,
1455 struct mlx4_priv *priv = mlx4_priv(dev);
1456 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1460 spin_lock_irq(mlx4_tlock(dev));
1461 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1464 else if (r->com.owner != slave)
1469 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1470 __func__, (unsigned long long)r->com.res_id);
1474 case RES_QP_RESERVED:
1475 if (r->com.state == RES_QP_MAPPED && !alloc)
1478 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", (unsigned long long)r->com.res_id);
1483 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1484 r->com.state == RES_QP_HW)
1487 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1488 (unsigned long long)r->com.res_id);
1495 if (r->com.state != RES_QP_MAPPED)
1503 r->com.from_state = r->com.state;
1504 r->com.to_state = state;
1505 r->com.state = RES_QP_BUSY;
1511 spin_unlock_irq(mlx4_tlock(dev));
1516 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1517 enum res_mpt_states state, struct res_mpt **mpt)
1519 struct mlx4_priv *priv = mlx4_priv(dev);
1520 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1524 spin_lock_irq(mlx4_tlock(dev));
1525 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1528 else if (r->com.owner != slave)
1536 case RES_MPT_RESERVED:
1537 if (r->com.state != RES_MPT_MAPPED)
1541 case RES_MPT_MAPPED:
1542 if (r->com.state != RES_MPT_RESERVED &&
1543 r->com.state != RES_MPT_HW)
1548 if (r->com.state != RES_MPT_MAPPED)
1556 r->com.from_state = r->com.state;
1557 r->com.to_state = state;
1558 r->com.state = RES_MPT_BUSY;
1564 spin_unlock_irq(mlx4_tlock(dev));
1569 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1570 enum res_eq_states state, struct res_eq **eq)
1572 struct mlx4_priv *priv = mlx4_priv(dev);
1573 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1577 spin_lock_irq(mlx4_tlock(dev));
1578 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1581 else if (r->com.owner != slave)
1589 case RES_EQ_RESERVED:
1590 if (r->com.state != RES_EQ_HW)
1595 if (r->com.state != RES_EQ_RESERVED)
1604 r->com.from_state = r->com.state;
1605 r->com.to_state = state;
1606 r->com.state = RES_EQ_BUSY;
1610 spin_unlock_irq(mlx4_tlock(dev));
1618 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1619 enum res_cq_states state, struct res_cq **cq)
1621 struct mlx4_priv *priv = mlx4_priv(dev);
1622 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1626 spin_lock_irq(mlx4_tlock(dev));
1627 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1630 } else if (r->com.owner != slave) {
1632 } else if (state == RES_CQ_ALLOCATED) {
1633 if (r->com.state != RES_CQ_HW)
1635 else if (atomic_read(&r->ref_count))
1639 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1646 r->com.from_state = r->com.state;
1647 r->com.to_state = state;
1648 r->com.state = RES_CQ_BUSY;
1653 spin_unlock_irq(mlx4_tlock(dev));
1658 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1659 enum res_srq_states state, struct res_srq **srq)
1661 struct mlx4_priv *priv = mlx4_priv(dev);
1662 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1666 spin_lock_irq(mlx4_tlock(dev));
1667 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1670 } else if (r->com.owner != slave) {
1672 } else if (state == RES_SRQ_ALLOCATED) {
1673 if (r->com.state != RES_SRQ_HW)
1675 else if (atomic_read(&r->ref_count))
1677 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1682 r->com.from_state = r->com.state;
1683 r->com.to_state = state;
1684 r->com.state = RES_SRQ_BUSY;
1689 spin_unlock_irq(mlx4_tlock(dev));
1694 static void res_abort_move(struct mlx4_dev *dev, int slave,
1695 enum mlx4_resource type, int id)
1697 struct mlx4_priv *priv = mlx4_priv(dev);
1698 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1699 struct res_common *r;
1701 spin_lock_irq(mlx4_tlock(dev));
1702 r = res_tracker_lookup(&tracker->res_tree[type], id);
1703 if (r && (r->owner == slave))
1704 r->state = r->from_state;
1705 spin_unlock_irq(mlx4_tlock(dev));
1708 static void res_end_move(struct mlx4_dev *dev, int slave,
1709 enum mlx4_resource type, int id)
1711 struct mlx4_priv *priv = mlx4_priv(dev);
1712 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1713 struct res_common *r;
1715 spin_lock_irq(mlx4_tlock(dev));
1716 r = res_tracker_lookup(&tracker->res_tree[type], id);
1717 if (r && (r->owner == slave))
1718 r->state = r->to_state;
1719 spin_unlock_irq(mlx4_tlock(dev));
1722 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1724 return mlx4_is_qp_reserved(dev, qpn) &&
1725 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1728 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1730 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1733 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1734 u64 in_param, u64 *out_param)
1744 case RES_OP_RESERVE:
1745 count = get_param_l(&in_param) & 0xffffff;
1746 /* Turn off all unsupported QP allocation flags that the
1747 * slave tries to set.
1749 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1750 align = get_param_h(&in_param);
1751 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1755 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1757 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1761 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1763 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1764 __mlx4_qp_release_range(dev, base, count);
1767 set_param_l(out_param, base);
1769 case RES_OP_MAP_ICM:
1770 qpn = get_param_l(&in_param) & 0x7fffff;
1771 if (valid_reserved(dev, slave, qpn)) {
1772 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1777 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1782 if (!fw_reserved(dev, qpn)) {
1783 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1785 res_abort_move(dev, slave, RES_QP, qpn);
1790 res_end_move(dev, slave, RES_QP, qpn);
1800 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1801 u64 in_param, u64 *out_param)
1807 if (op != RES_OP_RESERVE_AND_MAP)
1810 order = get_param_l(&in_param);
1812 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1816 base = __mlx4_alloc_mtt_range(dev, order);
1818 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1822 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1824 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1825 __mlx4_free_mtt_range(dev, base, order);
1827 set_param_l(out_param, base);
1833 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1834 u64 in_param, u64 *out_param)
1839 struct res_mpt *mpt;
1842 case RES_OP_RESERVE:
1843 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1847 index = __mlx4_mpt_reserve(dev);
1849 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1852 id = index & mpt_mask(dev);
1854 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1856 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1857 __mlx4_mpt_release(dev, index);
1860 set_param_l(out_param, index);
1862 case RES_OP_MAP_ICM:
1863 index = get_param_l(&in_param);
1864 id = index & mpt_mask(dev);
1865 err = mr_res_start_move_to(dev, slave, id,
1866 RES_MPT_MAPPED, &mpt);
1870 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1872 res_abort_move(dev, slave, RES_MPT, id);
1876 res_end_move(dev, slave, RES_MPT, id);
1882 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1883 u64 in_param, u64 *out_param)
1889 case RES_OP_RESERVE_AND_MAP:
1890 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1894 err = __mlx4_cq_alloc_icm(dev, &cqn);
1896 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1900 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1902 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1903 __mlx4_cq_free_icm(dev, cqn);
1907 set_param_l(out_param, cqn);
1917 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1918 u64 in_param, u64 *out_param)
1924 case RES_OP_RESERVE_AND_MAP:
1925 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1929 err = __mlx4_srq_alloc_icm(dev, &srqn);
1931 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1935 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1937 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1938 __mlx4_srq_free_icm(dev, srqn);
1942 set_param_l(out_param, srqn);
1952 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1953 u8 smac_index, u64 *mac)
1955 struct mlx4_priv *priv = mlx4_priv(dev);
1956 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1957 struct list_head *mac_list =
1958 &tracker->slave_list[slave].res_list[RES_MAC];
1959 struct mac_res *res, *tmp;
1961 list_for_each_entry_safe(res, tmp, mac_list, list) {
1962 if (res->smac_index == smac_index && res->port == (u8) port) {
1970 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1972 struct mlx4_priv *priv = mlx4_priv(dev);
1973 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1974 struct list_head *mac_list =
1975 &tracker->slave_list[slave].res_list[RES_MAC];
1976 struct mac_res *res, *tmp;
1978 list_for_each_entry_safe(res, tmp, mac_list, list) {
1979 if (res->mac == mac && res->port == (u8) port) {
1980 /* mac found. update ref count */
1986 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1988 res = kzalloc(sizeof *res, GFP_KERNEL);
1990 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1994 res->port = (u8) port;
1995 res->smac_index = smac_index;
1997 list_add_tail(&res->list,
1998 &tracker->slave_list[slave].res_list[RES_MAC]);
2002 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2005 struct mlx4_priv *priv = mlx4_priv(dev);
2006 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2007 struct list_head *mac_list =
2008 &tracker->slave_list[slave].res_list[RES_MAC];
2009 struct mac_res *res, *tmp;
2011 list_for_each_entry_safe(res, tmp, mac_list, list) {
2012 if (res->mac == mac && res->port == (u8) port) {
2013 if (!--res->ref_count) {
2014 list_del(&res->list);
2015 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2023 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2025 struct mlx4_priv *priv = mlx4_priv(dev);
2026 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2027 struct list_head *mac_list =
2028 &tracker->slave_list[slave].res_list[RES_MAC];
2029 struct mac_res *res, *tmp;
2032 list_for_each_entry_safe(res, tmp, mac_list, list) {
2033 list_del(&res->list);
2034 /* dereference the mac the num times the slave referenced it */
2035 for (i = 0; i < res->ref_count; i++)
2036 __mlx4_unregister_mac(dev, res->port, res->mac);
2037 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2042 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2043 u64 in_param, u64 *out_param, int in_port)
2050 if (op != RES_OP_RESERVE_AND_MAP)
2053 port = !in_port ? get_param_l(out_param) : in_port;
2054 port = mlx4_slave_convert_port(
2061 err = __mlx4_register_mac(dev, port, mac);
2064 set_param_l(out_param, err);
2069 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2071 __mlx4_unregister_mac(dev, port, mac);
2076 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2077 int port, int vlan_index)
2079 struct mlx4_priv *priv = mlx4_priv(dev);
2080 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2081 struct list_head *vlan_list =
2082 &tracker->slave_list[slave].res_list[RES_VLAN];
2083 struct vlan_res *res, *tmp;
2085 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2086 if (res->vlan == vlan && res->port == (u8) port) {
2087 /* vlan found. update ref count */
2093 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2095 res = kzalloc(sizeof(*res), GFP_KERNEL);
2097 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2101 res->port = (u8) port;
2102 res->vlan_index = vlan_index;
2104 list_add_tail(&res->list,
2105 &tracker->slave_list[slave].res_list[RES_VLAN]);
2110 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2113 struct mlx4_priv *priv = mlx4_priv(dev);
2114 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2115 struct list_head *vlan_list =
2116 &tracker->slave_list[slave].res_list[RES_VLAN];
2117 struct vlan_res *res, *tmp;
2119 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2120 if (res->vlan == vlan && res->port == (u8) port) {
2121 if (!--res->ref_count) {
2122 list_del(&res->list);
2123 mlx4_release_resource(dev, slave, RES_VLAN,
2132 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2134 struct mlx4_priv *priv = mlx4_priv(dev);
2135 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2136 struct list_head *vlan_list =
2137 &tracker->slave_list[slave].res_list[RES_VLAN];
2138 struct vlan_res *res, *tmp;
2141 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2142 list_del(&res->list);
2143 /* dereference the vlan the num times the slave referenced it */
2144 for (i = 0; i < res->ref_count; i++)
2145 __mlx4_unregister_vlan(dev, res->port, res->vlan);
2146 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2151 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2152 u64 in_param, u64 *out_param, int in_port)
2154 struct mlx4_priv *priv = mlx4_priv(dev);
2155 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2161 port = !in_port ? get_param_l(out_param) : in_port;
2163 if (!port || op != RES_OP_RESERVE_AND_MAP)
2166 port = mlx4_slave_convert_port(
2171 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2172 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2173 slave_state[slave].old_vlan_api = true;
2177 vlan = (u16) in_param;
2179 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2181 set_param_l(out_param, (u32) vlan_index);
2182 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2184 __mlx4_unregister_vlan(dev, port, vlan);
2189 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2190 u64 in_param, u64 *out_param, int port)
2195 if (op != RES_OP_RESERVE)
2198 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2202 err = __mlx4_counter_alloc(dev, &index);
2204 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2208 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2210 __mlx4_counter_free(dev, index);
2211 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2213 set_param_l(out_param, index);
2219 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2220 u64 in_param, u64 *out_param)
2225 if (op != RES_OP_RESERVE)
2228 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2232 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2234 __mlx4_xrcd_free(dev, xrcdn);
2236 set_param_l(out_param, xrcdn);
2241 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2242 struct mlx4_vhcr *vhcr,
2243 struct mlx4_cmd_mailbox *inbox,
2244 struct mlx4_cmd_mailbox *outbox,
2245 struct mlx4_cmd_info *cmd)
2248 int alop = vhcr->op_modifier;
2250 switch (vhcr->in_modifier & 0xFF) {
2252 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2253 vhcr->in_param, &vhcr->out_param);
2257 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2258 vhcr->in_param, &vhcr->out_param);
2262 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2263 vhcr->in_param, &vhcr->out_param);
2267 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2268 vhcr->in_param, &vhcr->out_param);
2272 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2273 vhcr->in_param, &vhcr->out_param);
2277 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2278 vhcr->in_param, &vhcr->out_param,
2279 (vhcr->in_modifier >> 8) & 0xFF);
2283 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2284 vhcr->in_param, &vhcr->out_param,
2285 (vhcr->in_modifier >> 8) & 0xFF);
2289 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2290 vhcr->in_param, &vhcr->out_param, 0);
2294 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2295 vhcr->in_param, &vhcr->out_param);
2306 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2315 case RES_OP_RESERVE:
2316 base = get_param_l(&in_param) & 0x7fffff;
2317 count = get_param_h(&in_param);
2318 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2321 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2322 __mlx4_qp_release_range(dev, base, count);
2324 case RES_OP_MAP_ICM:
2325 qpn = get_param_l(&in_param) & 0x7fffff;
2326 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2331 if (!fw_reserved(dev, qpn))
2332 __mlx4_qp_free_icm(dev, qpn);
2334 res_end_move(dev, slave, RES_QP, qpn);
2336 if (valid_reserved(dev, slave, qpn))
2337 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2346 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2347 u64 in_param, u64 *out_param)
2353 if (op != RES_OP_RESERVE_AND_MAP)
2356 base = get_param_l(&in_param);
2357 order = get_param_h(&in_param);
2358 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2360 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2361 __mlx4_free_mtt_range(dev, base, order);
2366 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2372 struct res_mpt *mpt;
2375 case RES_OP_RESERVE:
2376 index = get_param_l(&in_param);
2377 id = index & mpt_mask(dev);
2378 err = get_res(dev, slave, id, RES_MPT, &mpt);
2382 put_res(dev, slave, id, RES_MPT);
2384 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2387 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2388 __mlx4_mpt_release(dev, index);
2390 case RES_OP_MAP_ICM:
2391 index = get_param_l(&in_param);
2392 id = index & mpt_mask(dev);
2393 err = mr_res_start_move_to(dev, slave, id,
2394 RES_MPT_RESERVED, &mpt);
2398 __mlx4_mpt_free_icm(dev, mpt->key);
2399 res_end_move(dev, slave, RES_MPT, id);
2408 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2409 u64 in_param, u64 *out_param)
2415 case RES_OP_RESERVE_AND_MAP:
2416 cqn = get_param_l(&in_param);
2417 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2421 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2422 __mlx4_cq_free_icm(dev, cqn);
2433 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2434 u64 in_param, u64 *out_param)
2440 case RES_OP_RESERVE_AND_MAP:
2441 srqn = get_param_l(&in_param);
2442 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2446 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2447 __mlx4_srq_free_icm(dev, srqn);
2458 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2459 u64 in_param, u64 *out_param, int in_port)
2465 case RES_OP_RESERVE_AND_MAP:
2466 port = !in_port ? get_param_l(out_param) : in_port;
2467 port = mlx4_slave_convert_port(
2472 mac_del_from_slave(dev, slave, in_param, port);
2473 __mlx4_unregister_mac(dev, port, in_param);
2484 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2485 u64 in_param, u64 *out_param, int port)
2487 struct mlx4_priv *priv = mlx4_priv(dev);
2488 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2491 port = mlx4_slave_convert_port(
2497 case RES_OP_RESERVE_AND_MAP:
2498 if (slave_state[slave].old_vlan_api)
2502 vlan_del_from_slave(dev, slave, in_param, port);
2503 __mlx4_unregister_vlan(dev, port, in_param);
2513 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2514 u64 in_param, u64 *out_param)
2519 if (op != RES_OP_RESERVE)
2522 index = get_param_l(&in_param);
2523 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2526 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2530 __mlx4_counter_free(dev, index);
2531 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2536 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2537 u64 in_param, u64 *out_param)
2542 if (op != RES_OP_RESERVE)
2545 xrcdn = get_param_l(&in_param);
2546 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2550 __mlx4_xrcd_free(dev, xrcdn);
2555 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2556 struct mlx4_vhcr *vhcr,
2557 struct mlx4_cmd_mailbox *inbox,
2558 struct mlx4_cmd_mailbox *outbox,
2559 struct mlx4_cmd_info *cmd)
2562 int alop = vhcr->op_modifier;
2564 switch (vhcr->in_modifier & 0xFF) {
2566 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2571 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2572 vhcr->in_param, &vhcr->out_param);
2576 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2581 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2582 vhcr->in_param, &vhcr->out_param);
2586 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2587 vhcr->in_param, &vhcr->out_param);
2591 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2592 vhcr->in_param, &vhcr->out_param,
2593 (vhcr->in_modifier >> 8) & 0xFF);
2597 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2598 vhcr->in_param, &vhcr->out_param,
2599 (vhcr->in_modifier >> 8) & 0xFF);
2603 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2604 vhcr->in_param, &vhcr->out_param);
2608 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2609 vhcr->in_param, &vhcr->out_param);
2617 /* ugly but other choices are uglier */
2618 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2620 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2623 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2625 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2628 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2630 return be32_to_cpu(mpt->mtt_sz);
2633 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2635 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2638 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2640 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2643 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2645 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2648 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2650 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2653 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2655 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2658 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2660 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2663 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2665 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2666 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2667 int log_sq_sride = qpc->sq_size_stride & 7;
2668 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2669 int log_rq_stride = qpc->rq_size_stride & 7;
2670 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2671 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2672 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2673 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2678 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2680 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2681 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2682 total_mem = sq_size + rq_size;
2684 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2690 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2691 int size, struct res_mtt *mtt)
2693 int res_start = mtt->com.res_id;
2694 int res_size = (1 << mtt->order);
2696 if (start < res_start || start + size > res_start + res_size)
2701 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2702 struct mlx4_vhcr *vhcr,
2703 struct mlx4_cmd_mailbox *inbox,
2704 struct mlx4_cmd_mailbox *outbox,
2705 struct mlx4_cmd_info *cmd)
2708 int index = vhcr->in_modifier;
2709 struct res_mtt *mtt;
2710 struct res_mpt *mpt;
2711 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2717 id = index & mpt_mask(dev);
2718 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2722 /* Disable memory windows for VFs. */
2723 if (!mr_is_region(inbox->buf)) {
2728 /* Make sure that the PD bits related to the slave id are zeros. */
2729 pd = mr_get_pd(inbox->buf);
2730 pd_slave = (pd >> 17) & 0x7f;
2731 if (pd_slave != 0 && --pd_slave != slave) {
2736 if (mr_is_fmr(inbox->buf)) {
2737 /* FMR and Bind Enable are forbidden in slave devices. */
2738 if (mr_is_bind_enabled(inbox->buf)) {
2742 /* FMR and Memory Windows are also forbidden. */
2743 if (!mr_is_region(inbox->buf)) {
2749 phys = mr_phys_mpt(inbox->buf);
2751 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2755 err = check_mtt_range(dev, slave, mtt_base,
2756 mr_get_mtt_size(inbox->buf), mtt);
2763 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2768 atomic_inc(&mtt->ref_count);
2769 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2772 res_end_move(dev, slave, RES_MPT, id);
2777 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2779 res_abort_move(dev, slave, RES_MPT, id);
2784 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2785 struct mlx4_vhcr *vhcr,
2786 struct mlx4_cmd_mailbox *inbox,
2787 struct mlx4_cmd_mailbox *outbox,
2788 struct mlx4_cmd_info *cmd)
2791 int index = vhcr->in_modifier;
2792 struct res_mpt *mpt;
2795 id = index & mpt_mask(dev);
2796 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2800 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2805 atomic_dec(&mpt->mtt->ref_count);
2807 res_end_move(dev, slave, RES_MPT, id);
2811 res_abort_move(dev, slave, RES_MPT, id);
2816 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2817 struct mlx4_vhcr *vhcr,
2818 struct mlx4_cmd_mailbox *inbox,
2819 struct mlx4_cmd_mailbox *outbox,
2820 struct mlx4_cmd_info *cmd)
2823 int index = vhcr->in_modifier;
2824 struct res_mpt *mpt;
2827 id = index & mpt_mask(dev);
2828 err = get_res(dev, slave, id, RES_MPT, &mpt);
2832 if (mpt->com.from_state == RES_MPT_MAPPED) {
2833 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2834 * that, the VF must read the MPT. But since the MPT entry memory is not
2835 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2836 * entry contents. To guarantee that the MPT cannot be changed, the driver
2837 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2838 * ownership fofollowing the change. The change here allows the VF to
2839 * perform QUERY_MPT also when the entry is in SW ownership.
2841 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2842 &mlx4_priv(dev)->mr_table.dmpt_table,
2845 if (NULL == mpt_entry || NULL == outbox->buf) {
2850 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2853 } else if (mpt->com.from_state == RES_MPT_HW) {
2854 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2862 put_res(dev, slave, id, RES_MPT);
2866 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2868 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2871 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2873 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2876 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2878 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2881 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2882 struct mlx4_qp_context *context)
2884 u32 qpn = vhcr->in_modifier & 0xffffff;
2887 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2890 /* adjust qkey in qp context */
2891 context->qkey = cpu_to_be32(qkey);
2894 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2895 struct mlx4_qp_context *qpc,
2896 struct mlx4_cmd_mailbox *inbox);
2898 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2899 struct mlx4_vhcr *vhcr,
2900 struct mlx4_cmd_mailbox *inbox,
2901 struct mlx4_cmd_mailbox *outbox,
2902 struct mlx4_cmd_info *cmd)
2905 int qpn = vhcr->in_modifier & 0x7fffff;
2906 struct res_mtt *mtt;
2908 struct mlx4_qp_context *qpc = inbox->buf + 8;
2909 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2910 int mtt_size = qp_get_mtt_size(qpc);
2913 int rcqn = qp_get_rcqn(qpc);
2914 int scqn = qp_get_scqn(qpc);
2915 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2916 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2917 struct res_srq *srq;
2918 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2920 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2924 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2927 qp->local_qpn = local_qpn;
2928 qp->sched_queue = 0;
2930 qp->vlan_control = 0;
2932 qp->pri_path_fl = 0;
2935 qp->qpc_flags = be32_to_cpu(qpc->flags);
2937 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2941 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2945 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2950 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2957 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2962 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2963 update_pkey_index(dev, slave, inbox);
2964 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2967 atomic_inc(&mtt->ref_count);
2969 atomic_inc(&rcq->ref_count);
2971 atomic_inc(&scq->ref_count);
2975 put_res(dev, slave, scqn, RES_CQ);
2978 atomic_inc(&srq->ref_count);
2979 put_res(dev, slave, srqn, RES_SRQ);
2982 put_res(dev, slave, rcqn, RES_CQ);
2983 put_res(dev, slave, mtt_base, RES_MTT);
2984 res_end_move(dev, slave, RES_QP, qpn);
2990 put_res(dev, slave, srqn, RES_SRQ);
2993 put_res(dev, slave, scqn, RES_CQ);
2995 put_res(dev, slave, rcqn, RES_CQ);
2997 put_res(dev, slave, mtt_base, RES_MTT);
2999 res_abort_move(dev, slave, RES_QP, qpn);
3004 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3006 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3009 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3011 int log_eq_size = eqc->log_eq_size & 0x1f;
3012 int page_shift = (eqc->log_page_size & 0x3f) + 12;
3014 if (log_eq_size + 5 < page_shift)
3017 return 1 << (log_eq_size + 5 - page_shift);
3020 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3022 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3025 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3027 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3028 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3030 if (log_cq_size + 5 < page_shift)
3033 return 1 << (log_cq_size + 5 - page_shift);
3036 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3037 struct mlx4_vhcr *vhcr,
3038 struct mlx4_cmd_mailbox *inbox,
3039 struct mlx4_cmd_mailbox *outbox,
3040 struct mlx4_cmd_info *cmd)
3043 int eqn = vhcr->in_modifier;
3044 int res_id = (slave << 10) | eqn;
3045 struct mlx4_eq_context *eqc = inbox->buf;
3046 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3047 int mtt_size = eq_get_mtt_size(eqc);
3049 struct res_mtt *mtt;
3051 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3054 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3058 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3062 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3066 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3070 atomic_inc(&mtt->ref_count);
3072 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3073 res_end_move(dev, slave, RES_EQ, res_id);
3077 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3079 res_abort_move(dev, slave, RES_EQ, res_id);
3081 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3085 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3086 struct mlx4_vhcr *vhcr,
3087 struct mlx4_cmd_mailbox *inbox,
3088 struct mlx4_cmd_mailbox *outbox,
3089 struct mlx4_cmd_info *cmd)
3092 u8 get = vhcr->op_modifier;
3097 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3102 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3103 int len, struct res_mtt **res)
3105 struct mlx4_priv *priv = mlx4_priv(dev);
3106 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3107 struct res_mtt *mtt;
3110 spin_lock_irq(mlx4_tlock(dev));
3111 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3113 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3115 mtt->com.from_state = mtt->com.state;
3116 mtt->com.state = RES_MTT_BUSY;
3121 spin_unlock_irq(mlx4_tlock(dev));
3126 static int verify_qp_parameters(struct mlx4_dev *dev,
3127 struct mlx4_vhcr *vhcr,
3128 struct mlx4_cmd_mailbox *inbox,
3129 enum qp_transition transition, u8 slave)
3133 struct mlx4_qp_context *qp_ctx;
3134 enum mlx4_qp_optpar optpar;
3138 qp_ctx = inbox->buf + 8;
3139 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3140 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3142 if (slave != mlx4_master_func_num(dev)) {
3143 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3144 /* setting QP rate-limit is disallowed for VFs */
3145 if (qp_ctx->rate_limit_params)
3151 case MLX4_QP_ST_XRC:
3153 switch (transition) {
3154 case QP_TRANS_INIT2RTR:
3155 case QP_TRANS_RTR2RTS:
3156 case QP_TRANS_RTS2RTS:
3157 case QP_TRANS_SQD2SQD:
3158 case QP_TRANS_SQD2RTS:
3159 if (slave != mlx4_master_func_num(dev)) {
3160 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3161 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3162 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3163 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3166 if (qp_ctx->pri_path.mgid_index >= num_gids)
3169 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3170 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3171 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3172 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3175 if (qp_ctx->alt_path.mgid_index >= num_gids)
3185 case MLX4_QP_ST_MLX:
3186 qpn = vhcr->in_modifier & 0x7fffff;
3187 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3188 if (transition == QP_TRANS_INIT2RTR &&
3189 slave != mlx4_master_func_num(dev) &&
3190 mlx4_is_qp_reserved(dev, qpn) &&
3191 !mlx4_vf_smi_enabled(dev, slave, port)) {
3192 /* only enabled VFs may create MLX proxy QPs */
3193 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3194 __func__, slave, port);
3206 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3207 struct mlx4_vhcr *vhcr,
3208 struct mlx4_cmd_mailbox *inbox,
3209 struct mlx4_cmd_mailbox *outbox,
3210 struct mlx4_cmd_info *cmd)
3212 struct mlx4_mtt mtt;
3213 __be64 *page_list = inbox->buf;
3214 u64 *pg_list = (u64 *)page_list;
3216 struct res_mtt *rmtt = NULL;
3217 int start = be64_to_cpu(page_list[0]);
3218 int npages = vhcr->in_modifier;
3221 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3225 /* Call the SW implementation of write_mtt:
3226 * - Prepare a dummy mtt struct
3227 * - Translate inbox contents to simple addresses in host endianness */
3228 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3229 we don't really use it */
3232 for (i = 0; i < npages; ++i)
3233 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3235 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3236 ((u64 *)page_list + 2));
3239 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3244 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3245 struct mlx4_vhcr *vhcr,
3246 struct mlx4_cmd_mailbox *inbox,
3247 struct mlx4_cmd_mailbox *outbox,
3248 struct mlx4_cmd_info *cmd)
3250 int eqn = vhcr->in_modifier;
3251 int res_id = eqn | (slave << 10);
3255 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3259 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3263 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3267 atomic_dec(&eq->mtt->ref_count);
3268 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3269 res_end_move(dev, slave, RES_EQ, res_id);
3270 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3275 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3277 res_abort_move(dev, slave, RES_EQ, res_id);
3282 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3284 struct mlx4_priv *priv = mlx4_priv(dev);
3285 struct mlx4_slave_event_eq_info *event_eq;
3286 struct mlx4_cmd_mailbox *mailbox;
3287 u32 in_modifier = 0;
3292 if (!priv->mfunc.master.slave_state)
3295 /* check for slave valid, slave not PF, and slave active */
3296 if (slave < 0 || slave > dev->persist->num_vfs ||
3297 slave == dev->caps.function ||
3298 !priv->mfunc.master.slave_state[slave].active)
3301 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3303 /* Create the event only if the slave is registered */
3304 if (event_eq->eqn < 0)
3307 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3308 res_id = (slave << 10) | event_eq->eqn;
3309 err = get_res(dev, slave, res_id, RES_EQ, &req);
3313 if (req->com.from_state != RES_EQ_HW) {
3318 mailbox = mlx4_alloc_cmd_mailbox(dev);
3319 if (IS_ERR(mailbox)) {
3320 err = PTR_ERR(mailbox);
3324 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3326 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3329 memcpy(mailbox->buf, (u8 *) eqe, 28);
3331 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3333 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3334 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3337 put_res(dev, slave, res_id, RES_EQ);
3338 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3339 mlx4_free_cmd_mailbox(dev, mailbox);
3343 put_res(dev, slave, res_id, RES_EQ);
3346 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3350 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3351 struct mlx4_vhcr *vhcr,
3352 struct mlx4_cmd_mailbox *inbox,
3353 struct mlx4_cmd_mailbox *outbox,
3354 struct mlx4_cmd_info *cmd)
3356 int eqn = vhcr->in_modifier;
3357 int res_id = eqn | (slave << 10);
3361 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3365 if (eq->com.from_state != RES_EQ_HW) {
3370 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3373 put_res(dev, slave, res_id, RES_EQ);
3377 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3378 struct mlx4_vhcr *vhcr,
3379 struct mlx4_cmd_mailbox *inbox,
3380 struct mlx4_cmd_mailbox *outbox,
3381 struct mlx4_cmd_info *cmd)
3384 int cqn = vhcr->in_modifier;
3385 struct mlx4_cq_context *cqc = inbox->buf;
3386 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3387 struct res_cq *cq = NULL;
3388 struct res_mtt *mtt;
3390 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3393 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3396 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3399 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3402 atomic_inc(&mtt->ref_count);
3404 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3405 res_end_move(dev, slave, RES_CQ, cqn);
3409 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3411 res_abort_move(dev, slave, RES_CQ, cqn);
3415 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3416 struct mlx4_vhcr *vhcr,
3417 struct mlx4_cmd_mailbox *inbox,
3418 struct mlx4_cmd_mailbox *outbox,
3419 struct mlx4_cmd_info *cmd)
3422 int cqn = vhcr->in_modifier;
3423 struct res_cq *cq = NULL;
3425 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3428 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3431 atomic_dec(&cq->mtt->ref_count);
3432 res_end_move(dev, slave, RES_CQ, cqn);
3436 res_abort_move(dev, slave, RES_CQ, cqn);
3440 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3441 struct mlx4_vhcr *vhcr,
3442 struct mlx4_cmd_mailbox *inbox,
3443 struct mlx4_cmd_mailbox *outbox,
3444 struct mlx4_cmd_info *cmd)
3446 int cqn = vhcr->in_modifier;
3450 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3454 if (cq->com.from_state != RES_CQ_HW)
3457 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3459 put_res(dev, slave, cqn, RES_CQ);
3464 static int handle_resize(struct mlx4_dev *dev, int slave,
3465 struct mlx4_vhcr *vhcr,
3466 struct mlx4_cmd_mailbox *inbox,
3467 struct mlx4_cmd_mailbox *outbox,
3468 struct mlx4_cmd_info *cmd,
3472 struct res_mtt *orig_mtt;
3473 struct res_mtt *mtt;
3474 struct mlx4_cq_context *cqc = inbox->buf;
3475 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3477 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3481 if (orig_mtt != cq->mtt) {
3486 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3490 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3493 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3496 atomic_dec(&orig_mtt->ref_count);
3497 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3498 atomic_inc(&mtt->ref_count);
3500 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3504 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3506 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3512 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3513 struct mlx4_vhcr *vhcr,
3514 struct mlx4_cmd_mailbox *inbox,
3515 struct mlx4_cmd_mailbox *outbox,
3516 struct mlx4_cmd_info *cmd)
3518 int cqn = vhcr->in_modifier;
3522 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3526 if (cq->com.from_state != RES_CQ_HW)
3529 if (vhcr->op_modifier == 0) {
3530 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3534 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3536 put_res(dev, slave, cqn, RES_CQ);
3541 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3543 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3544 int log_rq_stride = srqc->logstride & 7;
3545 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3547 if (log_srq_size + log_rq_stride + 4 < page_shift)
3550 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3553 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3554 struct mlx4_vhcr *vhcr,
3555 struct mlx4_cmd_mailbox *inbox,
3556 struct mlx4_cmd_mailbox *outbox,
3557 struct mlx4_cmd_info *cmd)
3560 int srqn = vhcr->in_modifier;
3561 struct res_mtt *mtt;
3562 struct res_srq *srq = NULL;
3563 struct mlx4_srq_context *srqc = inbox->buf;
3564 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3566 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3569 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3572 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3575 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3580 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3584 atomic_inc(&mtt->ref_count);
3586 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3587 res_end_move(dev, slave, RES_SRQ, srqn);
3591 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3593 res_abort_move(dev, slave, RES_SRQ, srqn);
3598 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3599 struct mlx4_vhcr *vhcr,
3600 struct mlx4_cmd_mailbox *inbox,
3601 struct mlx4_cmd_mailbox *outbox,
3602 struct mlx4_cmd_info *cmd)
3605 int srqn = vhcr->in_modifier;
3606 struct res_srq *srq = NULL;
3608 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3611 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3614 atomic_dec(&srq->mtt->ref_count);
3616 atomic_dec(&srq->cq->ref_count);
3617 res_end_move(dev, slave, RES_SRQ, srqn);
3622 res_abort_move(dev, slave, RES_SRQ, srqn);
3627 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3628 struct mlx4_vhcr *vhcr,
3629 struct mlx4_cmd_mailbox *inbox,
3630 struct mlx4_cmd_mailbox *outbox,
3631 struct mlx4_cmd_info *cmd)
3634 int srqn = vhcr->in_modifier;
3635 struct res_srq *srq;
3637 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3640 if (srq->com.from_state != RES_SRQ_HW) {
3644 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3646 put_res(dev, slave, srqn, RES_SRQ);
3650 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3651 struct mlx4_vhcr *vhcr,
3652 struct mlx4_cmd_mailbox *inbox,
3653 struct mlx4_cmd_mailbox *outbox,
3654 struct mlx4_cmd_info *cmd)
3657 int srqn = vhcr->in_modifier;
3658 struct res_srq *srq;
3660 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3664 if (srq->com.from_state != RES_SRQ_HW) {
3669 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3671 put_res(dev, slave, srqn, RES_SRQ);
3675 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3676 struct mlx4_vhcr *vhcr,
3677 struct mlx4_cmd_mailbox *inbox,
3678 struct mlx4_cmd_mailbox *outbox,
3679 struct mlx4_cmd_info *cmd)
3682 int qpn = vhcr->in_modifier & 0x7fffff;
3685 err = get_res(dev, slave, qpn, RES_QP, &qp);
3688 if (qp->com.from_state != RES_QP_HW) {
3693 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3695 put_res(dev, slave, qpn, RES_QP);
3699 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3700 struct mlx4_vhcr *vhcr,
3701 struct mlx4_cmd_mailbox *inbox,
3702 struct mlx4_cmd_mailbox *outbox,
3703 struct mlx4_cmd_info *cmd)
3705 struct mlx4_qp_context *context = inbox->buf + 8;
3706 adjust_proxy_tun_qkey(dev, vhcr, context);
3707 update_pkey_index(dev, slave, inbox);
3708 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3711 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3712 struct mlx4_qp_context *qpc,
3713 struct mlx4_cmd_mailbox *inbox)
3715 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3717 int port = mlx4_slave_convert_port(
3718 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3723 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3726 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3727 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3728 qpc->pri_path.sched_queue = pri_sched_queue;
3731 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3732 port = mlx4_slave_convert_port(
3733 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3737 qpc->alt_path.sched_queue =
3738 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3744 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3745 struct mlx4_qp_context *qpc,
3746 struct mlx4_cmd_mailbox *inbox)
3750 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3751 u8 sched = *(u8 *)(inbox->buf + 64);
3754 port = (sched >> 6 & 1) + 1;
3755 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3756 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3757 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3763 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3764 struct mlx4_vhcr *vhcr,
3765 struct mlx4_cmd_mailbox *inbox,
3766 struct mlx4_cmd_mailbox *outbox,
3767 struct mlx4_cmd_info *cmd)
3770 struct mlx4_qp_context *qpc = inbox->buf + 8;
3771 int qpn = vhcr->in_modifier & 0x7fffff;
3773 u8 orig_sched_queue;
3774 __be32 orig_param3 = qpc->param3;
3775 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3776 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3777 u8 orig_pri_path_fl = qpc->pri_path.fl;
3778 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3779 u8 orig_feup = qpc->pri_path.feup;
3781 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3784 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3788 if (roce_verify_mac(dev, slave, qpc, inbox))
3791 update_pkey_index(dev, slave, inbox);
3792 update_gid(dev, inbox, (u8)slave);
3793 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3794 orig_sched_queue = qpc->pri_path.sched_queue;
3796 err = get_res(dev, slave, qpn, RES_QP, &qp);
3799 if (qp->com.from_state != RES_QP_HW) {
3804 err = update_vport_qp_param(dev, inbox, slave, qpn);
3808 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3810 /* if no error, save sched queue value passed in by VF. This is
3811 * essentially the QOS value provided by the VF. This will be useful
3812 * if we allow dynamic changes from VST back to VGT
3815 qp->sched_queue = orig_sched_queue;
3816 qp->param3 = orig_param3;
3817 qp->vlan_control = orig_vlan_control;
3818 qp->fvl_rx = orig_fvl_rx;
3819 qp->pri_path_fl = orig_pri_path_fl;
3820 qp->vlan_index = orig_vlan_index;
3821 qp->feup = orig_feup;
3823 put_res(dev, slave, qpn, RES_QP);
3827 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3828 struct mlx4_vhcr *vhcr,
3829 struct mlx4_cmd_mailbox *inbox,
3830 struct mlx4_cmd_mailbox *outbox,
3831 struct mlx4_cmd_info *cmd)
3834 struct mlx4_qp_context *context = inbox->buf + 8;
3836 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3839 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3843 update_pkey_index(dev, slave, inbox);
3844 update_gid(dev, inbox, (u8)slave);
3845 adjust_proxy_tun_qkey(dev, vhcr, context);
3846 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3849 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3850 struct mlx4_vhcr *vhcr,
3851 struct mlx4_cmd_mailbox *inbox,
3852 struct mlx4_cmd_mailbox *outbox,
3853 struct mlx4_cmd_info *cmd)
3856 struct mlx4_qp_context *context = inbox->buf + 8;
3858 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3861 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3865 update_pkey_index(dev, slave, inbox);
3866 update_gid(dev, inbox, (u8)slave);
3867 adjust_proxy_tun_qkey(dev, vhcr, context);
3868 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3872 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3873 struct mlx4_vhcr *vhcr,
3874 struct mlx4_cmd_mailbox *inbox,
3875 struct mlx4_cmd_mailbox *outbox,
3876 struct mlx4_cmd_info *cmd)
3878 struct mlx4_qp_context *context = inbox->buf + 8;
3879 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3882 adjust_proxy_tun_qkey(dev, vhcr, context);
3883 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3886 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3887 struct mlx4_vhcr *vhcr,
3888 struct mlx4_cmd_mailbox *inbox,
3889 struct mlx4_cmd_mailbox *outbox,
3890 struct mlx4_cmd_info *cmd)
3893 struct mlx4_qp_context *context = inbox->buf + 8;
3895 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3898 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3902 adjust_proxy_tun_qkey(dev, vhcr, context);
3903 update_gid(dev, inbox, (u8)slave);
3904 update_pkey_index(dev, slave, inbox);
3905 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3908 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3909 struct mlx4_vhcr *vhcr,
3910 struct mlx4_cmd_mailbox *inbox,
3911 struct mlx4_cmd_mailbox *outbox,
3912 struct mlx4_cmd_info *cmd)
3915 struct mlx4_qp_context *context = inbox->buf + 8;
3917 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3920 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3924 adjust_proxy_tun_qkey(dev, vhcr, context);
3925 update_gid(dev, inbox, (u8)slave);
3926 update_pkey_index(dev, slave, inbox);
3927 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3930 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3931 struct mlx4_vhcr *vhcr,
3932 struct mlx4_cmd_mailbox *inbox,
3933 struct mlx4_cmd_mailbox *outbox,
3934 struct mlx4_cmd_info *cmd)
3937 int qpn = vhcr->in_modifier & 0x7fffff;
3940 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3943 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3947 atomic_dec(&qp->mtt->ref_count);
3948 atomic_dec(&qp->rcq->ref_count);
3949 atomic_dec(&qp->scq->ref_count);
3951 atomic_dec(&qp->srq->ref_count);
3952 res_end_move(dev, slave, RES_QP, qpn);
3956 res_abort_move(dev, slave, RES_QP, qpn);
3961 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3962 struct res_qp *rqp, u8 *gid)
3964 struct res_gid *res;
3966 list_for_each_entry(res, &rqp->mcg_list, list) {
3967 if (!memcmp(res->gid, gid, 16))
3973 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3974 u8 *gid, enum mlx4_protocol prot,
3975 enum mlx4_steer_type steer, u64 reg_id)
3977 struct res_gid *res;
3980 res = kzalloc(sizeof *res, GFP_KERNEL);
3984 spin_lock_irq(&rqp->mcg_spl);
3985 if (find_gid(dev, slave, rqp, gid)) {
3989 memcpy(res->gid, gid, 16);
3992 res->reg_id = reg_id;
3993 list_add_tail(&res->list, &rqp->mcg_list);
3996 spin_unlock_irq(&rqp->mcg_spl);
4001 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4002 u8 *gid, enum mlx4_protocol prot,
4003 enum mlx4_steer_type steer, u64 *reg_id)
4005 struct res_gid *res;
4008 spin_lock_irq(&rqp->mcg_spl);
4009 res = find_gid(dev, slave, rqp, gid);
4010 if (!res || res->prot != prot || res->steer != steer)
4013 *reg_id = res->reg_id;
4014 list_del(&res->list);
4018 spin_unlock_irq(&rqp->mcg_spl);
4023 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4024 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4025 enum mlx4_steer_type type, u64 *reg_id)
4027 switch (dev->caps.steering_mode) {
4028 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4029 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4032 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4033 block_loopback, prot,
4036 case MLX4_STEERING_MODE_B0:
4037 if (prot == MLX4_PROT_ETH) {
4038 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4043 return mlx4_qp_attach_common(dev, qp, gid,
4044 block_loopback, prot, type);
4050 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4051 u8 gid[16], enum mlx4_protocol prot,
4052 enum mlx4_steer_type type, u64 reg_id)
4054 switch (dev->caps.steering_mode) {
4055 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4056 return mlx4_flow_detach(dev, reg_id);
4057 case MLX4_STEERING_MODE_B0:
4058 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4064 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4065 u8 *gid, enum mlx4_protocol prot)
4069 if (prot != MLX4_PROT_ETH)
4072 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4073 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4074 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4083 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4084 struct mlx4_vhcr *vhcr,
4085 struct mlx4_cmd_mailbox *inbox,
4086 struct mlx4_cmd_mailbox *outbox,
4087 struct mlx4_cmd_info *cmd)
4089 struct mlx4_qp qp; /* dummy for calling attach/detach */
4090 u8 *gid = inbox->buf;
4091 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4096 int attach = vhcr->op_modifier;
4097 int block_loopback = vhcr->in_modifier >> 31;
4098 u8 steer_type_mask = 2;
4099 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4101 qpn = vhcr->in_modifier & 0xffffff;
4102 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4108 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4111 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4114 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4118 err = mlx4_adjust_port(dev, slave, gid, prot);
4122 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
4126 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4128 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4129 qpn, (unsigned long long)reg_id);
4131 put_res(dev, slave, qpn, RES_QP);
4135 qp_detach(dev, &qp, gid, prot, type, reg_id);
4137 put_res(dev, slave, qpn, RES_QP);
4142 * MAC validation for Flow Steering rules.
4143 * VF can attach rules only with a mac address which is assigned to it.
4145 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4146 struct list_head *rlist)
4148 struct mac_res *res, *tmp;
4151 /* make sure it isn't multicast or broadcast mac*/
4152 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4153 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4154 list_for_each_entry_safe(res, tmp, rlist, list) {
4155 be_mac = cpu_to_be64(res->mac << 16);
4156 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4159 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4160 eth_header->eth.dst_mac, slave);
4166 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4167 struct _rule_hw *eth_header)
4169 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4170 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4171 struct mlx4_net_trans_rule_hw_eth *eth =
4172 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4173 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4174 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4175 next_rule->rsvd == 0;
4178 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4183 * In case of missing eth header, append eth header with a MAC address
4184 * assigned to the VF.
4186 static int add_eth_header(struct mlx4_dev *dev, int slave,
4187 struct mlx4_cmd_mailbox *inbox,
4188 struct list_head *rlist, int header_id)
4190 struct mac_res *res, *tmp;
4192 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4193 struct mlx4_net_trans_rule_hw_eth *eth_header;
4194 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4195 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4197 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4199 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4201 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4203 /* Clear a space in the inbox for eth header */
4204 switch (header_id) {
4205 case MLX4_NET_TRANS_RULE_ID_IPV4:
4207 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4208 memmove(ip_header, eth_header,
4209 sizeof(*ip_header) + sizeof(*l4_header));
4211 case MLX4_NET_TRANS_RULE_ID_TCP:
4212 case MLX4_NET_TRANS_RULE_ID_UDP:
4213 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4215 memmove(l4_header, eth_header, sizeof(*l4_header));
4220 list_for_each_entry_safe(res, tmp, rlist, list) {
4221 if (port == res->port) {
4222 be_mac = cpu_to_be64(res->mac << 16);
4227 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4232 memset(eth_header, 0, sizeof(*eth_header));
4233 eth_header->size = sizeof(*eth_header) >> 2;
4234 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4235 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4236 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4241 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4242 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4243 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4244 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4245 struct mlx4_vhcr *vhcr,
4246 struct mlx4_cmd_mailbox *inbox,
4247 struct mlx4_cmd_mailbox *outbox,
4248 struct mlx4_cmd_info *cmd_info)
4251 u32 qpn = vhcr->in_modifier & 0xffffff;
4255 u64 pri_addr_path_mask;
4256 struct mlx4_update_qp_context *cmd;
4259 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4261 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4262 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4263 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4266 if ((pri_addr_path_mask &
4267 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4268 !(dev->caps.flags2 &
4269 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4270 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4275 /* Just change the smac for the QP */
4276 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4278 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4282 port = (rqp->sched_queue >> 6 & 1) + 1;
4284 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4285 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4286 err = mac_find_smac_ix_in_slave(dev, slave, port,
4290 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4296 err = mlx4_cmd(dev, inbox->dma,
4297 vhcr->in_modifier, 0,
4298 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4301 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4306 put_res(dev, slave, qpn, RES_QP);
4310 static u32 qp_attach_mbox_size(void *mbox)
4312 u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4313 struct _rule_hw *rule_header;
4315 rule_header = (struct _rule_hw *)(mbox + size);
4317 while (rule_header->size) {
4318 size += rule_header->size * sizeof(u32);
4324 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4326 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4327 struct mlx4_vhcr *vhcr,
4328 struct mlx4_cmd_mailbox *inbox,
4329 struct mlx4_cmd_mailbox *outbox,
4330 struct mlx4_cmd_info *cmd)
4333 struct mlx4_priv *priv = mlx4_priv(dev);
4334 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4335 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4339 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4340 struct _rule_hw *rule_header;
4342 struct res_fs_rule *rrule;
4345 if (dev->caps.steering_mode !=
4346 MLX4_STEERING_MODE_DEVICE_MANAGED)
4349 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4350 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4354 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4355 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4357 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4360 rule_header = (struct _rule_hw *)(ctrl + 1);
4361 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4363 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4364 handle_eth_header_mcast_prio(ctrl, rule_header);
4366 if (slave == dev->caps.function)
4369 switch (header_id) {
4370 case MLX4_NET_TRANS_RULE_ID_ETH:
4371 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4376 case MLX4_NET_TRANS_RULE_ID_IB:
4378 case MLX4_NET_TRANS_RULE_ID_IPV4:
4379 case MLX4_NET_TRANS_RULE_ID_TCP:
4380 case MLX4_NET_TRANS_RULE_ID_UDP:
4381 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4382 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4386 vhcr->in_modifier +=
4387 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4390 pr_err("Corrupted mailbox\n");
4396 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4397 vhcr->in_modifier, 0,
4398 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4404 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4406 mlx4_err(dev, "Fail to add flow steering resources\n");
4410 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4414 mbox_size = qp_attach_mbox_size(inbox->buf);
4415 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4416 if (!rrule->mirr_mbox) {
4420 rrule->mirr_mbox_size = mbox_size;
4421 rrule->mirr_rule_id = 0;
4422 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4424 /* set different port */
4425 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4426 if (ctrl->port == 1)
4431 if (mlx4_is_bonded(dev))
4432 mlx4_do_mirror_rule(dev, rrule);
4434 atomic_inc(&rqp->ref_count);
4437 put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4439 /* detach rule on error */
4441 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4442 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4445 put_res(dev, slave, qpn, RES_QP);
4449 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4453 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4455 mlx4_err(dev, "Fail to remove flow steering resources\n");
4459 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4460 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4464 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4465 struct mlx4_vhcr *vhcr,
4466 struct mlx4_cmd_mailbox *inbox,
4467 struct mlx4_cmd_mailbox *outbox,
4468 struct mlx4_cmd_info *cmd)
4472 struct res_fs_rule *rrule;
4475 if (dev->caps.steering_mode !=
4476 MLX4_STEERING_MODE_DEVICE_MANAGED)
4479 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4483 if (!rrule->mirr_mbox) {
4484 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4485 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4488 mirr_reg_id = rrule->mirr_rule_id;
4489 kfree(rrule->mirr_mbox);
4491 /* Release the rule form busy state before removal */
4492 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4493 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4497 if (mirr_reg_id && mlx4_is_bonded(dev)) {
4498 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4500 mlx4_err(dev, "Fail to get resource of mirror rule\n");
4502 put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4503 mlx4_undo_mirror_rule(dev, rrule);
4506 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4508 mlx4_err(dev, "Fail to remove flow steering resources\n");
4512 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4513 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4516 atomic_dec(&rqp->ref_count);
4518 put_res(dev, slave, rrule->qpn, RES_QP);
4523 BUSY_MAX_RETRIES = 10
4526 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4527 struct mlx4_vhcr *vhcr,
4528 struct mlx4_cmd_mailbox *inbox,
4529 struct mlx4_cmd_mailbox *outbox,
4530 struct mlx4_cmd_info *cmd)
4533 int index = vhcr->in_modifier & 0xffff;
4535 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4539 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4540 put_res(dev, slave, index, RES_COUNTER);
4544 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4546 struct res_gid *rgid;
4547 struct res_gid *tmp;
4548 struct mlx4_qp qp; /* dummy for calling attach/detach */
4550 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4551 switch (dev->caps.steering_mode) {
4552 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4553 mlx4_flow_detach(dev, rgid->reg_id);
4555 case MLX4_STEERING_MODE_B0:
4556 qp.qpn = rqp->local_qpn;
4557 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4558 rgid->prot, rgid->steer);
4561 list_del(&rgid->list);
4566 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4567 enum mlx4_resource type, int print)
4569 struct mlx4_priv *priv = mlx4_priv(dev);
4570 struct mlx4_resource_tracker *tracker =
4571 &priv->mfunc.master.res_tracker;
4572 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4573 struct res_common *r;
4574 struct res_common *tmp;
4578 spin_lock_irq(mlx4_tlock(dev));
4579 list_for_each_entry_safe(r, tmp, rlist, list) {
4580 if (r->owner == slave) {
4582 if (r->state == RES_ANY_BUSY) {
4585 "%s id 0x%llx is busy\n",
4587 (long long)r->res_id);
4590 r->from_state = r->state;
4591 r->state = RES_ANY_BUSY;
4597 spin_unlock_irq(mlx4_tlock(dev));
4602 static int move_all_busy(struct mlx4_dev *dev, int slave,
4603 enum mlx4_resource type)
4605 unsigned long begin;
4610 busy = _move_all_busy(dev, slave, type, 0);
4611 if (time_after(jiffies, begin + 5 * HZ))
4618 busy = _move_all_busy(dev, slave, type, 1);
4622 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4624 struct mlx4_priv *priv = mlx4_priv(dev);
4625 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4626 struct list_head *qp_list =
4627 &tracker->slave_list[slave].res_list[RES_QP];
4635 err = move_all_busy(dev, slave, RES_QP);
4637 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4640 spin_lock_irq(mlx4_tlock(dev));
4641 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4642 spin_unlock_irq(mlx4_tlock(dev));
4643 if (qp->com.owner == slave) {
4644 qpn = qp->com.res_id;
4645 detach_qp(dev, slave, qp);
4646 state = qp->com.from_state;
4647 while (state != 0) {
4649 case RES_QP_RESERVED:
4650 spin_lock_irq(mlx4_tlock(dev));
4651 rb_erase(&qp->com.node,
4652 &tracker->res_tree[RES_QP]);
4653 list_del(&qp->com.list);
4654 spin_unlock_irq(mlx4_tlock(dev));
4655 if (!valid_reserved(dev, slave, qpn)) {
4656 __mlx4_qp_release_range(dev, qpn, 1);
4657 mlx4_release_resource(dev, slave,
4664 if (!valid_reserved(dev, slave, qpn))
4665 __mlx4_qp_free_icm(dev, qpn);
4666 state = RES_QP_RESERVED;
4670 err = mlx4_cmd(dev, in_param,
4673 MLX4_CMD_TIME_CLASS_A,
4676 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4677 slave, qp->local_qpn);
4678 atomic_dec(&qp->rcq->ref_count);
4679 atomic_dec(&qp->scq->ref_count);
4680 atomic_dec(&qp->mtt->ref_count);
4682 atomic_dec(&qp->srq->ref_count);
4683 state = RES_QP_MAPPED;
4690 spin_lock_irq(mlx4_tlock(dev));
4692 spin_unlock_irq(mlx4_tlock(dev));
4695 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4697 struct mlx4_priv *priv = mlx4_priv(dev);
4698 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4699 struct list_head *srq_list =
4700 &tracker->slave_list[slave].res_list[RES_SRQ];
4701 struct res_srq *srq;
4702 struct res_srq *tmp;
4709 err = move_all_busy(dev, slave, RES_SRQ);
4711 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4714 spin_lock_irq(mlx4_tlock(dev));
4715 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4716 spin_unlock_irq(mlx4_tlock(dev));
4717 if (srq->com.owner == slave) {
4718 srqn = srq->com.res_id;
4719 state = srq->com.from_state;
4720 while (state != 0) {
4722 case RES_SRQ_ALLOCATED:
4723 __mlx4_srq_free_icm(dev, srqn);
4724 spin_lock_irq(mlx4_tlock(dev));
4725 rb_erase(&srq->com.node,
4726 &tracker->res_tree[RES_SRQ]);
4727 list_del(&srq->com.list);
4728 spin_unlock_irq(mlx4_tlock(dev));
4729 mlx4_release_resource(dev, slave,
4737 err = mlx4_cmd(dev, in_param, srqn, 1,
4739 MLX4_CMD_TIME_CLASS_A,
4742 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4745 atomic_dec(&srq->mtt->ref_count);
4747 atomic_dec(&srq->cq->ref_count);
4748 state = RES_SRQ_ALLOCATED;
4756 spin_lock_irq(mlx4_tlock(dev));
4758 spin_unlock_irq(mlx4_tlock(dev));
4761 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4763 struct mlx4_priv *priv = mlx4_priv(dev);
4764 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4765 struct list_head *cq_list =
4766 &tracker->slave_list[slave].res_list[RES_CQ];
4775 err = move_all_busy(dev, slave, RES_CQ);
4777 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4780 spin_lock_irq(mlx4_tlock(dev));
4781 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4782 spin_unlock_irq(mlx4_tlock(dev));
4783 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4784 cqn = cq->com.res_id;
4785 state = cq->com.from_state;
4786 while (state != 0) {
4788 case RES_CQ_ALLOCATED:
4789 __mlx4_cq_free_icm(dev, cqn);
4790 spin_lock_irq(mlx4_tlock(dev));
4791 rb_erase(&cq->com.node,
4792 &tracker->res_tree[RES_CQ]);
4793 list_del(&cq->com.list);
4794 spin_unlock_irq(mlx4_tlock(dev));
4795 mlx4_release_resource(dev, slave,
4803 err = mlx4_cmd(dev, in_param, cqn, 1,
4805 MLX4_CMD_TIME_CLASS_A,
4808 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4810 atomic_dec(&cq->mtt->ref_count);
4811 state = RES_CQ_ALLOCATED;
4819 spin_lock_irq(mlx4_tlock(dev));
4821 spin_unlock_irq(mlx4_tlock(dev));
4824 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4826 struct mlx4_priv *priv = mlx4_priv(dev);
4827 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4828 struct list_head *mpt_list =
4829 &tracker->slave_list[slave].res_list[RES_MPT];
4830 struct res_mpt *mpt;
4831 struct res_mpt *tmp;
4838 err = move_all_busy(dev, slave, RES_MPT);
4840 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4843 spin_lock_irq(mlx4_tlock(dev));
4844 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4845 spin_unlock_irq(mlx4_tlock(dev));
4846 if (mpt->com.owner == slave) {
4847 mptn = mpt->com.res_id;
4848 state = mpt->com.from_state;
4849 while (state != 0) {
4851 case RES_MPT_RESERVED:
4852 __mlx4_mpt_release(dev, mpt->key);
4853 spin_lock_irq(mlx4_tlock(dev));
4854 rb_erase(&mpt->com.node,
4855 &tracker->res_tree[RES_MPT]);
4856 list_del(&mpt->com.list);
4857 spin_unlock_irq(mlx4_tlock(dev));
4858 mlx4_release_resource(dev, slave,
4864 case RES_MPT_MAPPED:
4865 __mlx4_mpt_free_icm(dev, mpt->key);
4866 state = RES_MPT_RESERVED;
4871 err = mlx4_cmd(dev, in_param, mptn, 0,
4873 MLX4_CMD_TIME_CLASS_A,
4876 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4879 atomic_dec(&mpt->mtt->ref_count);
4880 state = RES_MPT_MAPPED;
4887 spin_lock_irq(mlx4_tlock(dev));
4889 spin_unlock_irq(mlx4_tlock(dev));
4892 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4894 struct mlx4_priv *priv = mlx4_priv(dev);
4895 struct mlx4_resource_tracker *tracker =
4896 &priv->mfunc.master.res_tracker;
4897 struct list_head *mtt_list =
4898 &tracker->slave_list[slave].res_list[RES_MTT];
4899 struct res_mtt *mtt;
4900 struct res_mtt *tmp;
4906 err = move_all_busy(dev, slave, RES_MTT);
4908 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4911 spin_lock_irq(mlx4_tlock(dev));
4912 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4913 spin_unlock_irq(mlx4_tlock(dev));
4914 if (mtt->com.owner == slave) {
4915 base = mtt->com.res_id;
4916 state = mtt->com.from_state;
4917 while (state != 0) {
4919 case RES_MTT_ALLOCATED:
4920 __mlx4_free_mtt_range(dev, base,
4922 spin_lock_irq(mlx4_tlock(dev));
4923 rb_erase(&mtt->com.node,
4924 &tracker->res_tree[RES_MTT]);
4925 list_del(&mtt->com.list);
4926 spin_unlock_irq(mlx4_tlock(dev));
4927 mlx4_release_resource(dev, slave, RES_MTT,
4928 1 << mtt->order, 0);
4938 spin_lock_irq(mlx4_tlock(dev));
4940 spin_unlock_irq(mlx4_tlock(dev));
4943 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4945 struct mlx4_cmd_mailbox *mailbox;
4947 struct res_fs_rule *mirr_rule;
4950 mailbox = mlx4_alloc_cmd_mailbox(dev);
4951 if (IS_ERR(mailbox))
4952 return PTR_ERR(mailbox);
4954 if (!fs_rule->mirr_mbox) {
4955 mlx4_err(dev, "rule mirroring mailbox is null\n");
4958 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4959 err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0,
4960 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4962 mlx4_free_cmd_mailbox(dev, mailbox);
4967 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4971 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4975 fs_rule->mirr_rule_id = reg_id;
4976 mirr_rule->mirr_rule_id = 0;
4977 mirr_rule->mirr_mbox_size = 0;
4978 mirr_rule->mirr_mbox = NULL;
4979 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
4983 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
4985 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4986 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4991 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
4993 struct mlx4_priv *priv = mlx4_priv(dev);
4994 struct mlx4_resource_tracker *tracker =
4995 &priv->mfunc.master.res_tracker;
4996 struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
4998 struct res_fs_rule *fs_rule;
5000 LIST_HEAD(mirr_list);
5002 for (p = rb_first(root); p; p = rb_next(p)) {
5003 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5004 if ((bond && fs_rule->mirr_mbox_size) ||
5005 (!bond && !fs_rule->mirr_mbox_size))
5006 list_add_tail(&fs_rule->mirr_list, &mirr_list);
5009 list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5011 err += mlx4_do_mirror_rule(dev, fs_rule);
5013 err += mlx4_undo_mirror_rule(dev, fs_rule);
5018 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5020 return mlx4_mirror_fs_rules(dev, true);
5023 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5025 return mlx4_mirror_fs_rules(dev, false);
5028 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5030 struct mlx4_priv *priv = mlx4_priv(dev);
5031 struct mlx4_resource_tracker *tracker =
5032 &priv->mfunc.master.res_tracker;
5033 struct list_head *fs_rule_list =
5034 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5035 struct res_fs_rule *fs_rule;
5036 struct res_fs_rule *tmp;
5041 err = move_all_busy(dev, slave, RES_FS_RULE);
5043 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5046 spin_lock_irq(mlx4_tlock(dev));
5047 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5048 spin_unlock_irq(mlx4_tlock(dev));
5049 if (fs_rule->com.owner == slave) {
5050 base = fs_rule->com.res_id;
5051 state = fs_rule->com.from_state;
5052 while (state != 0) {
5054 case RES_FS_RULE_ALLOCATED:
5056 err = mlx4_cmd(dev, base, 0, 0,
5057 MLX4_QP_FLOW_STEERING_DETACH,
5058 MLX4_CMD_TIME_CLASS_A,
5061 spin_lock_irq(mlx4_tlock(dev));
5062 rb_erase(&fs_rule->com.node,
5063 &tracker->res_tree[RES_FS_RULE]);
5064 list_del(&fs_rule->com.list);
5065 spin_unlock_irq(mlx4_tlock(dev));
5075 spin_lock_irq(mlx4_tlock(dev));
5077 spin_unlock_irq(mlx4_tlock(dev));
5080 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5082 struct mlx4_priv *priv = mlx4_priv(dev);
5083 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5084 struct list_head *eq_list =
5085 &tracker->slave_list[slave].res_list[RES_EQ];
5093 err = move_all_busy(dev, slave, RES_EQ);
5095 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5098 spin_lock_irq(mlx4_tlock(dev));
5099 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5100 spin_unlock_irq(mlx4_tlock(dev));
5101 if (eq->com.owner == slave) {
5102 eqn = eq->com.res_id;
5103 state = eq->com.from_state;
5104 while (state != 0) {
5106 case RES_EQ_RESERVED:
5107 spin_lock_irq(mlx4_tlock(dev));
5108 rb_erase(&eq->com.node,
5109 &tracker->res_tree[RES_EQ]);
5110 list_del(&eq->com.list);
5111 spin_unlock_irq(mlx4_tlock(dev));
5117 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5118 1, MLX4_CMD_HW2SW_EQ,
5119 MLX4_CMD_TIME_CLASS_A,
5122 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5123 slave, eqn & 0x3ff);
5124 atomic_dec(&eq->mtt->ref_count);
5125 state = RES_EQ_RESERVED;
5133 spin_lock_irq(mlx4_tlock(dev));
5135 spin_unlock_irq(mlx4_tlock(dev));
5138 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5140 struct mlx4_priv *priv = mlx4_priv(dev);
5141 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5142 struct list_head *counter_list =
5143 &tracker->slave_list[slave].res_list[RES_COUNTER];
5144 struct res_counter *counter;
5145 struct res_counter *tmp;
5147 int *counters_arr = NULL;
5150 err = move_all_busy(dev, slave, RES_COUNTER);
5152 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5155 counters_arr = kmalloc_array(dev->caps.max_counters,
5156 sizeof(*counters_arr), GFP_KERNEL);
5163 spin_lock_irq(mlx4_tlock(dev));
5164 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5165 if (counter->com.owner == slave) {
5166 counters_arr[i++] = counter->com.res_id;
5167 rb_erase(&counter->com.node,
5168 &tracker->res_tree[RES_COUNTER]);
5169 list_del(&counter->com.list);
5173 spin_unlock_irq(mlx4_tlock(dev));
5176 __mlx4_counter_free(dev, counters_arr[j++]);
5177 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5181 kfree(counters_arr);
5184 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5186 struct mlx4_priv *priv = mlx4_priv(dev);
5187 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5188 struct list_head *xrcdn_list =
5189 &tracker->slave_list[slave].res_list[RES_XRCD];
5190 struct res_xrcdn *xrcd;
5191 struct res_xrcdn *tmp;
5195 err = move_all_busy(dev, slave, RES_XRCD);
5197 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5200 spin_lock_irq(mlx4_tlock(dev));
5201 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5202 if (xrcd->com.owner == slave) {
5203 xrcdn = xrcd->com.res_id;
5204 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5205 list_del(&xrcd->com.list);
5207 __mlx4_xrcd_free(dev, xrcdn);
5210 spin_unlock_irq(mlx4_tlock(dev));
5213 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5215 struct mlx4_priv *priv = mlx4_priv(dev);
5216 mlx4_reset_roce_gids(dev, slave);
5217 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5218 rem_slave_vlans(dev, slave);
5219 rem_slave_macs(dev, slave);
5220 rem_slave_fs_rule(dev, slave);
5221 rem_slave_qps(dev, slave);
5222 rem_slave_srqs(dev, slave);
5223 rem_slave_cqs(dev, slave);
5224 rem_slave_mrs(dev, slave);
5225 rem_slave_eqs(dev, slave);
5226 rem_slave_mtts(dev, slave);
5227 rem_slave_counters(dev, slave);
5228 rem_slave_xrcdns(dev, slave);
5229 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5232 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5234 struct mlx4_vf_immed_vlan_work *work =
5235 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5236 struct mlx4_cmd_mailbox *mailbox;
5237 struct mlx4_update_qp_context *upd_context;
5238 struct mlx4_dev *dev = &work->priv->dev;
5239 struct mlx4_resource_tracker *tracker =
5240 &work->priv->mfunc.master.res_tracker;
5241 struct list_head *qp_list =
5242 &tracker->slave_list[work->slave].res_list[RES_QP];
5245 u64 qp_path_mask_vlan_ctrl =
5246 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5247 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5248 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5249 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5250 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5251 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5253 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5254 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5255 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5256 (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5257 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5258 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5259 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5260 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5263 int port, errors = 0;
5266 if (mlx4_is_slave(dev)) {
5267 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5272 mailbox = mlx4_alloc_cmd_mailbox(dev);
5273 if (IS_ERR(mailbox))
5275 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5276 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5277 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5278 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5279 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5280 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5281 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5282 else if (!work->vlan_id)
5283 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5284 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5285 else if (work->vlan_proto == htons(ETH_P_8021AD))
5286 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5287 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5288 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5289 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5290 else /* vst 802.1Q */
5291 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5292 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5293 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5295 upd_context = mailbox->buf;
5296 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5298 spin_lock_irq(mlx4_tlock(dev));
5299 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5300 spin_unlock_irq(mlx4_tlock(dev));
5301 if (qp->com.owner == work->slave) {
5302 if (qp->com.from_state != RES_QP_HW ||
5303 !qp->sched_queue || /* no INIT2RTR trans yet */
5304 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5305 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5306 spin_lock_irq(mlx4_tlock(dev));
5309 port = (qp->sched_queue >> 6 & 1) + 1;
5310 if (port != work->port) {
5311 spin_lock_irq(mlx4_tlock(dev));
5314 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5315 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5317 upd_context->primary_addr_path_mask =
5318 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5319 if (work->vlan_id == MLX4_VGT) {
5320 upd_context->qp_context.param3 = qp->param3;
5321 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5322 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5323 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5324 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5325 upd_context->qp_context.pri_path.feup = qp->feup;
5326 upd_context->qp_context.pri_path.sched_queue =
5329 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5330 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5331 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5332 upd_context->qp_context.pri_path.fvl_rx =
5333 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5334 upd_context->qp_context.pri_path.fl =
5335 qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5336 if (work->vlan_proto == htons(ETH_P_8021AD))
5337 upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5339 upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5340 upd_context->qp_context.pri_path.feup =
5341 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5342 upd_context->qp_context.pri_path.sched_queue =
5343 qp->sched_queue & 0xC7;
5344 upd_context->qp_context.pri_path.sched_queue |=
5345 ((work->qos & 0x7) << 3);
5346 upd_context->qp_mask |=
5348 MLX4_UPD_QP_MASK_QOS_VPP);
5349 upd_context->qp_context.qos_vport =
5353 err = mlx4_cmd(dev, mailbox->dma,
5354 qp->local_qpn & 0xffffff,
5355 0, MLX4_CMD_UPDATE_QP,
5356 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5358 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5359 work->slave, port, qp->local_qpn, err);
5363 spin_lock_irq(mlx4_tlock(dev));
5365 spin_unlock_irq(mlx4_tlock(dev));
5366 mlx4_free_cmd_mailbox(dev, mailbox);
5369 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5370 errors, work->slave, work->port);
5372 /* unregister previous vlan_id if needed and we had no errors
5373 * while updating the QPs
5375 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5376 NO_INDX != work->orig_vlan_ix)
5377 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5378 work->orig_vlan_id);