]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/ofed/drivers/net/mlx4/resource_tracker.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / ofed / drivers / net / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/compat.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         int ref_count;
56         u8 smac_index;
57         u8 port;
58 };
59
60 struct vlan_res {
61         struct list_head list;
62         u16 vlan;
63         int ref_count;
64         int vlan_index;
65         u8 port;
66 };
67
68 struct res_common {
69         struct list_head        list;
70         struct rb_node          node;
71         u64                     res_id;
72         int                     owner;
73         int                     state;
74         int                     from_state;
75         int                     to_state;
76         int                     removing;
77 };
78
79 enum {
80         RES_ANY_BUSY = 1
81 };
82
83 struct res_gid {
84         struct list_head        list;
85         u8                      gid[16];
86         enum mlx4_protocol      prot;
87         enum mlx4_steer_type    steer;
88 };
89
90 enum res_qp_states {
91         RES_QP_BUSY = RES_ANY_BUSY,
92
93         /* QP number was allocated */
94         RES_QP_RESERVED,
95
96         /* ICM memory for QP context was mapped */
97         RES_QP_MAPPED,
98
99         /* QP is in hw ownership */
100         RES_QP_HW
101 };
102
103 struct res_qp {
104         struct res_common       com;
105         struct res_mtt         *mtt;
106         struct res_cq          *rcq;
107         struct res_cq          *scq;
108         struct res_srq         *srq;
109         struct list_head        mcg_list;
110         spinlock_t              mcg_spl;
111         int                     local_qpn;
112 };
113
114 enum res_mtt_states {
115         RES_MTT_BUSY = RES_ANY_BUSY,
116         RES_MTT_ALLOCATED,
117 };
118
119 static inline const char *mtt_states_str(enum res_mtt_states state)
120 {
121         switch (state) {
122         case RES_MTT_BUSY: return "RES_MTT_BUSY";
123         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
124         default: return "Unknown";
125         }
126 }
127
128 struct res_mtt {
129         struct res_common       com;
130         int                     order;
131         atomic_t                ref_count;
132 };
133
134 enum res_mpt_states {
135         RES_MPT_BUSY = RES_ANY_BUSY,
136         RES_MPT_RESERVED,
137         RES_MPT_MAPPED,
138         RES_MPT_HW,
139 };
140
141 struct res_mpt {
142         struct res_common       com;
143         struct res_mtt         *mtt;
144         int                     key;
145 };
146
147 enum res_eq_states {
148         RES_EQ_BUSY = RES_ANY_BUSY,
149         RES_EQ_RESERVED,
150         RES_EQ_HW,
151 };
152
153 struct res_eq {
154         struct res_common       com;
155         struct res_mtt         *mtt;
156 };
157
158 enum res_cq_states {
159         RES_CQ_BUSY = RES_ANY_BUSY,
160         RES_CQ_ALLOCATED,
161         RES_CQ_HW,
162 };
163
164 struct res_cq {
165         struct res_common       com;
166         struct res_mtt         *mtt;
167         atomic_t                ref_count;
168 };
169
170 enum res_srq_states {
171         RES_SRQ_BUSY = RES_ANY_BUSY,
172         RES_SRQ_ALLOCATED,
173         RES_SRQ_HW,
174 };
175
176 struct res_srq {
177         struct res_common       com;
178         struct res_mtt         *mtt;
179         struct res_cq          *cq;
180         atomic_t                ref_count;
181 };
182
183 enum res_counter_states {
184         RES_COUNTER_BUSY = RES_ANY_BUSY,
185         RES_COUNTER_ALLOCATED,
186 };
187
188 struct res_counter {
189         struct res_common       com;
190         int                     port;
191 };
192
193 enum res_xrcdn_states {
194         RES_XRCD_BUSY = RES_ANY_BUSY,
195         RES_XRCD_ALLOCATED,
196 };
197
198 struct res_xrcdn {
199         struct res_common       com;
200         int                     port;
201 };
202
203 enum res_fs_rule_states {
204         RES_FS_RULE_BUSY = RES_ANY_BUSY,
205         RES_FS_RULE_ALLOCATED,
206 };
207
208 struct res_fs_rule {
209         struct res_common       com;
210 };
211
212 static int mlx4_is_eth(struct mlx4_dev *dev, int port)
213 {
214         return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
215 }
216
217 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
218 {
219         struct rb_node *node = root->rb_node;
220
221         while (node) {
222                 struct res_common *res = container_of(node, struct res_common,
223                                                       node);
224
225                 if (res_id < res->res_id)
226                         node = node->rb_left;
227                 else if (res_id > res->res_id)
228                         node = node->rb_right;
229                 else
230                         return res;
231         }
232         return NULL;
233 }
234
235 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
236 {
237         struct rb_node **new = &(root->rb_node), *parent = NULL;
238
239         /* Figure out where to put new node */
240         while (*new) {
241                 struct res_common *this = container_of(*new, struct res_common,
242                                                        node);
243
244                 parent = *new;
245                 if (res->res_id < this->res_id)
246                         new = &((*new)->rb_left);
247                 else if (res->res_id > this->res_id)
248                         new = &((*new)->rb_right);
249                 else
250                         return -EEXIST;
251         }
252
253         /* Add new node and rebalance tree. */
254         rb_link_node(&res->node, parent, new);
255         rb_insert_color(&res->node, root);
256
257         return 0;
258 }
259
260 enum qp_transition {
261         QP_TRANS_INIT2RTR,
262         QP_TRANS_RTR2RTS,
263         QP_TRANS_RTS2RTS,
264         QP_TRANS_SQERR2RTS,
265         QP_TRANS_SQD2SQD,
266         QP_TRANS_SQD2RTS
267 };
268
269 /* For Debug uses */
270 static const char *ResourceType(enum mlx4_resource rt)
271 {
272         switch (rt) {
273         case RES_QP: return "RES_QP";
274         case RES_CQ: return "RES_CQ";
275         case RES_SRQ: return "RES_SRQ";
276         case RES_MPT: return "RES_MPT";
277         case RES_MTT: return "RES_MTT";
278         case RES_MAC: return  "RES_MAC";
279         case RES_VLAN: return  "RES_VLAN";
280         case RES_EQ: return "RES_EQ";
281         case RES_COUNTER: return "RES_COUNTER";
282         case RES_FS_RULE: return "RES_FS_RULE";
283         case RES_XRCD: return "RES_XRCD";
284         default: return "Unknown resource type !!!";
285         };
286 }
287
288 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
289 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
290                                       enum mlx4_resource res_type, int count,
291                                       int port)
292 {
293         struct mlx4_priv *priv = mlx4_priv(dev);
294         struct resource_allocator *res_alloc =
295                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
296         int err = -EINVAL;
297         int allocated, free, reserved, guaranteed, from_free;
298
299         spin_lock(&res_alloc->alloc_lock);
300         allocated = (port > 0) ?
301                 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
302                 res_alloc->allocated[slave];
303         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
304                 res_alloc->res_free;
305         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
306                 res_alloc->res_reserved;
307         guaranteed = res_alloc->guaranteed[slave];
308
309         if (allocated + count > res_alloc->quota[slave])
310                 goto out;
311
312         if (allocated + count <= guaranteed) {
313                 err = 0;
314         } else {
315                 /* portion may need to be obtained from free area */
316                 if (guaranteed - allocated > 0)
317                         from_free = count - (guaranteed - allocated);
318                 else
319                         from_free = count;
320
321                 if (free - from_free > reserved)
322                         err = 0;
323         }
324
325         if (!err) {
326                 /* grant the request */
327                 if (port > 0) {
328                         res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
329                         res_alloc->res_port_free[port - 1] -= count;
330                 } else {
331                         res_alloc->allocated[slave] += count;
332                         res_alloc->res_free -= count;
333                 }
334         }
335
336 out:
337         spin_unlock(&res_alloc->alloc_lock);
338         return err;
339
340 }
341
342 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
343                                     enum mlx4_resource res_type, int count,
344                                     int port)
345 {
346         struct mlx4_priv *priv = mlx4_priv(dev);
347         struct resource_allocator *res_alloc =
348                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
349
350         spin_lock(&res_alloc->alloc_lock);
351         if (port > 0) {
352                 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
353                 res_alloc->res_port_free[port - 1] += count;
354         } else {
355                 res_alloc->allocated[slave] -= count;
356                 res_alloc->res_free += count;
357         }
358
359         spin_unlock(&res_alloc->alloc_lock);
360         return;
361 }
362
363 static inline void initialize_res_quotas(struct mlx4_dev *dev,
364                                          struct resource_allocator *res_alloc,
365                                          enum mlx4_resource res_type,
366                                          int vf, int num_instances)
367 {
368         res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
369         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
370         if (vf == mlx4_master_func_num(dev)) {
371                 res_alloc->res_free = num_instances;
372                 if (res_type == RES_MTT) {
373                         /* reserved mtts will be taken out of the PF allocation */
374                         res_alloc->res_free += dev->caps.reserved_mtts;
375                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
376                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
377                 }
378         }
379 }
380
381 void mlx4_init_quotas(struct mlx4_dev *dev)
382 {
383         struct mlx4_priv *priv = mlx4_priv(dev);
384         int pf;
385
386         /* quotas for VFs are initialized in mlx4_slave_cap */
387         if (mlx4_is_slave(dev))
388                 return;
389
390         if (!mlx4_is_mfunc(dev)) {
391                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
392                         mlx4_num_reserved_sqps(dev);
393                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
394                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
395                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
396                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
397                 return;
398         }
399
400         pf = mlx4_master_func_num(dev);
401         dev->quotas.qp =
402                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
403         dev->quotas.cq =
404                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
405         dev->quotas.srq =
406                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
407         dev->quotas.mtt =
408                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
409         dev->quotas.mpt =
410                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
411 }
412 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
413 {
414         struct mlx4_priv *priv = mlx4_priv(dev);
415         int i, j;
416         int t;
417
418         priv->mfunc.master.res_tracker.slave_list =
419                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
420                         GFP_KERNEL);
421         if (!priv->mfunc.master.res_tracker.slave_list)
422                 return -ENOMEM;
423
424         for (i = 0 ; i < dev->num_slaves; i++) {
425                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
426                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
427                                        slave_list[i].res_list[t]);
428                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
429         }
430
431         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
432                  dev->num_slaves);
433         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
434                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
435
436         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
437                 struct resource_allocator *res_alloc =
438                         &priv->mfunc.master.res_tracker.res_alloc[i];
439                 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
440                 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
441                 if (i == RES_MAC || i == RES_VLAN)
442                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
443                                                        (dev->num_vfs + 1) * sizeof(int),
444                                                         GFP_KERNEL);
445                 else
446                         res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
447
448                 if (!res_alloc->quota || !res_alloc->guaranteed ||
449                     !res_alloc->allocated)
450                         goto no_mem_err;
451
452                 spin_lock_init(&res_alloc->alloc_lock);
453                 for (t = 0; t < dev->num_vfs + 1; t++) {
454                         switch (i) {
455                         case RES_QP:
456                                 initialize_res_quotas(dev, res_alloc, RES_QP,
457                                                       t, dev->caps.num_qps -
458                                                       dev->caps.reserved_qps -
459                                                       mlx4_num_reserved_sqps(dev));
460                                 break;
461                         case RES_CQ:
462                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
463                                                       t, dev->caps.num_cqs -
464                                                       dev->caps.reserved_cqs);
465                                 break;
466                         case RES_SRQ:
467                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
468                                                       t, dev->caps.num_srqs -
469                                                       dev->caps.reserved_srqs);
470                                 break;
471                         case RES_MPT:
472                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
473                                                       t, dev->caps.num_mpts -
474                                                       dev->caps.reserved_mrws);
475                                 break;
476                         case RES_MTT:
477                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
478                                                       t, dev->caps.num_mtts -
479                                                       dev->caps.reserved_mtts);
480                                 break;
481                         case RES_MAC:
482                                 if (t == mlx4_master_func_num(dev)) {
483                                         res_alloc->quota[t] =
484                                                 MLX4_MAX_MAC_NUM - 2 * dev->num_vfs;
485                                         res_alloc->guaranteed[t] = res_alloc->quota[t];
486                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
487                                                 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
488                                 } else {
489                                         res_alloc->quota[t] = 2;
490                                         res_alloc->guaranteed[t] = 2;
491                                 }
492                                 break;
493                         case RES_VLAN:
494                                 if (t == mlx4_master_func_num(dev)) {
495                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
496                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
497                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
498                                                 res_alloc->res_port_free[j] =
499                                                         res_alloc->quota[t];
500                                 } else {
501                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
502                                         res_alloc->guaranteed[t] = 0;
503                                 }
504                                 break;
505                         case RES_COUNTER:
506                                 res_alloc->quota[t] = dev->caps.max_counters;
507                                 res_alloc->guaranteed[t] = 0;
508                                 if (t == mlx4_master_func_num(dev))
509                                         res_alloc->res_free = res_alloc->quota[t];
510                                 break;
511                         default:
512                                 break;
513                         }
514                         if (i == RES_MAC || i == RES_VLAN) {
515                                 for (j = 0; j < MLX4_MAX_PORTS; j++)
516                                         res_alloc->res_port_rsvd[j] +=
517                                                 res_alloc->guaranteed[t];
518                         } else {
519                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
520                         }
521                 }
522         }
523         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
524         return 0;
525
526 no_mem_err:
527         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
528                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
529                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
530                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
531                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
532                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
533                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
534         }
535         return -ENOMEM;
536 }
537
538 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
539                                 enum mlx4_res_tracker_free_type type)
540 {
541         struct mlx4_priv *priv = mlx4_priv(dev);
542         int i;
543
544         if (priv->mfunc.master.res_tracker.slave_list) {
545                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
546                         for (i = 0; i < dev->num_slaves; i++) {
547                                 if (type == RES_TR_FREE_ALL ||
548                                     dev->caps.function != i)
549                                         mlx4_delete_all_resources_for_slave(dev, i);
550                         }
551                         /* free master's vlans */
552                         i = dev->caps.function;
553                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
554                         rem_slave_vlans(dev, i);
555                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
556                 }
557
558                 if (type != RES_TR_FREE_SLAVES_ONLY) {
559                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
560                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
561                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
562                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
563                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
564                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
565                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
566                         }
567                         kfree(priv->mfunc.master.res_tracker.slave_list);
568                         priv->mfunc.master.res_tracker.slave_list = NULL;
569                 }
570         }
571 }
572
573 static void update_pkey_index(struct mlx4_dev *dev, int slave,
574                               struct mlx4_cmd_mailbox *inbox)
575 {
576         u8 sched = *(u8 *)(inbox->buf + 64);
577         u8 orig_index = *(u8 *)(inbox->buf + 35);
578         u8 new_index;
579         struct mlx4_priv *priv = mlx4_priv(dev);
580         int port;
581
582         port = (sched >> 6 & 1) + 1;
583
584         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
585         *(u8 *)(inbox->buf + 35) = new_index;
586 }
587
588 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
589                        u8 slave)
590 {
591         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
592         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
593         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
594         int port;
595
596         if (MLX4_QP_ST_UD == ts) {
597                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
598                 if (mlx4_is_eth(dev, port))
599                         qp_ctx->pri_path.mgid_index = mlx4_get_base_gid_ix(dev, slave) | 0x80;
600                 else
601                         qp_ctx->pri_path.mgid_index = 0x80 | slave;
602
603         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
604                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
605                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
606                         if (mlx4_is_eth(dev, port)) {
607                                 qp_ctx->pri_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
608                                 qp_ctx->pri_path.mgid_index &= 0x7f;
609                         } else {
610                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
611                         }
612                 }
613                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
614                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
615                         if (mlx4_is_eth(dev, port)) {
616                                 qp_ctx->alt_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
617                                 qp_ctx->alt_path.mgid_index &= 0x7f;
618                         } else {
619                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
620                         }
621                 }
622         }
623 }
624
625 static int update_vport_qp_param(struct mlx4_dev *dev,
626                                  struct mlx4_cmd_mailbox *inbox,
627                                  u8 slave)
628 {
629         struct mlx4_qp_context  *qpc = inbox->buf + 8;
630         struct mlx4_vport_oper_state *vp_oper;
631         struct mlx4_priv *priv;
632         u32 qp_type;
633         int port;
634
635         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
636         priv = mlx4_priv(dev);
637         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
638
639         if (MLX4_VGT != vp_oper->state.default_vlan) {
640                 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
641                 if (MLX4_QP_ST_RC == qp_type)
642                         return -EINVAL;
643
644                 qpc->srqn |= cpu_to_be32(1 << 25); /*set cqe vlan mask */
645                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
646                 qpc->pri_path.fl = 1 << 6; /* set cv bit*/
647                 qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
648                 qpc->pri_path.sched_queue &= 0xC7;
649                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
650                 mlx4_dbg(dev, "qp %d  port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
651                          be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
652                          (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
653                          vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
654                          (int)(qpc->pri_path.fl));
655         }
656         if (vp_oper->state.spoofchk) {
657                 qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
658                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
659                 mlx4_dbg(dev, "spoof qp %d  port %d feup  0x%x, myLmc 0x%x mindx %d\n",
660                          be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
661                          (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
662                          vp_oper->mac_idx);
663         }
664         return 0;
665 }
666
667 static int mpt_mask(struct mlx4_dev *dev)
668 {
669         return dev->caps.num_mpts - 1;
670 }
671
672 static void *find_res(struct mlx4_dev *dev, int res_id,
673                       enum mlx4_resource type)
674 {
675         struct mlx4_priv *priv = mlx4_priv(dev);
676
677         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
678                                   res_id);
679 }
680
681 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
682                    enum mlx4_resource type,
683                    void *res)
684 {
685         struct res_common *r;
686         int err = 0;
687
688         spin_lock_irq(mlx4_tlock(dev));
689         r = find_res(dev, res_id, type);
690         if (!r) {
691                 err = -ENOENT;
692                 goto exit;
693         }
694
695         if (r->state == RES_ANY_BUSY) {
696                 err = -EBUSY;
697                 goto exit;
698         }
699
700         if (r->owner != slave) {
701                 err = -EPERM;
702                 goto exit;
703         }
704
705         r->from_state = r->state;
706         r->state = RES_ANY_BUSY;
707
708         if (res)
709                 *((struct res_common **)res) = r;
710
711 exit:
712         spin_unlock_irq(mlx4_tlock(dev));
713         return err;
714 }
715
716 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
717                                     enum mlx4_resource type,
718                                     u64 res_id, int *slave)
719 {
720
721         struct res_common *r;
722         int err = -ENOENT;
723         int id = res_id;
724
725         if (type == RES_QP)
726                 id &= 0x7fffff;
727         spin_lock(mlx4_tlock(dev));
728
729         r = find_res(dev, id, type);
730         if (r) {
731                 *slave = r->owner;
732                 err = 0;
733         }
734         spin_unlock(mlx4_tlock(dev));
735
736         return err;
737 }
738
739 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
740                     enum mlx4_resource type)
741 {
742         struct res_common *r;
743
744         spin_lock_irq(mlx4_tlock(dev));
745         r = find_res(dev, res_id, type);
746         if (r)
747                 r->state = r->from_state;
748         spin_unlock_irq(mlx4_tlock(dev));
749 }
750
751 static struct res_common *alloc_qp_tr(int id)
752 {
753         struct res_qp *ret;
754
755         ret = kzalloc(sizeof *ret, GFP_KERNEL);
756         if (!ret)
757                 return NULL;
758
759         ret->com.res_id = id;
760         ret->com.state = RES_QP_RESERVED;
761         ret->local_qpn = id;
762         INIT_LIST_HEAD(&ret->mcg_list);
763         spin_lock_init(&ret->mcg_spl);
764
765         return &ret->com;
766 }
767
768 static struct res_common *alloc_mtt_tr(int id, int order)
769 {
770         struct res_mtt *ret;
771
772         ret = kzalloc(sizeof *ret, GFP_KERNEL);
773         if (!ret)
774                 return NULL;
775
776         ret->com.res_id = id;
777         ret->order = order;
778         ret->com.state = RES_MTT_ALLOCATED;
779         atomic_set(&ret->ref_count, 0);
780
781         return &ret->com;
782 }
783
784 static struct res_common *alloc_mpt_tr(int id, int key)
785 {
786         struct res_mpt *ret;
787
788         ret = kzalloc(sizeof *ret, GFP_KERNEL);
789         if (!ret)
790                 return NULL;
791
792         ret->com.res_id = id;
793         ret->com.state = RES_MPT_RESERVED;
794         ret->key = key;
795
796         return &ret->com;
797 }
798
799 static struct res_common *alloc_eq_tr(int id)
800 {
801         struct res_eq *ret;
802
803         ret = kzalloc(sizeof *ret, GFP_KERNEL);
804         if (!ret)
805                 return NULL;
806
807         ret->com.res_id = id;
808         ret->com.state = RES_EQ_RESERVED;
809
810         return &ret->com;
811 }
812
813 static struct res_common *alloc_cq_tr(int id)
814 {
815         struct res_cq *ret;
816
817         ret = kzalloc(sizeof *ret, GFP_KERNEL);
818         if (!ret)
819                 return NULL;
820
821         ret->com.res_id = id;
822         ret->com.state = RES_CQ_ALLOCATED;
823         atomic_set(&ret->ref_count, 0);
824
825         return &ret->com;
826 }
827
828 static struct res_common *alloc_srq_tr(int id)
829 {
830         struct res_srq *ret;
831
832         ret = kzalloc(sizeof *ret, GFP_KERNEL);
833         if (!ret)
834                 return NULL;
835
836         ret->com.res_id = id;
837         ret->com.state = RES_SRQ_ALLOCATED;
838         atomic_set(&ret->ref_count, 0);
839
840         return &ret->com;
841 }
842
843 static struct res_common *alloc_counter_tr(int id)
844 {
845         struct res_counter *ret;
846
847         ret = kzalloc(sizeof *ret, GFP_KERNEL);
848         if (!ret)
849                 return NULL;
850
851         ret->com.res_id = id;
852         ret->com.state = RES_COUNTER_ALLOCATED;
853
854         return &ret->com;
855 }
856
857 static struct res_common *alloc_xrcdn_tr(int id)
858 {
859         struct res_xrcdn *ret;
860
861         ret = kzalloc(sizeof *ret, GFP_KERNEL);
862         if (!ret)
863                 return NULL;
864
865         ret->com.res_id = id;
866         ret->com.state = RES_XRCD_ALLOCATED;
867
868         return &ret->com;
869 }
870
871 static struct res_common *alloc_fs_rule_tr(u64 id)
872 {
873         struct res_fs_rule *ret;
874
875         ret = kzalloc(sizeof *ret, GFP_KERNEL);
876         if (!ret)
877                 return NULL;
878
879         ret->com.res_id = id;
880         ret->com.state = RES_FS_RULE_ALLOCATED;
881
882         return &ret->com;
883 }
884
885 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
886                                    int extra)
887 {
888         struct res_common *ret;
889
890         switch (type) {
891         case RES_QP:
892                 ret = alloc_qp_tr(id);
893                 break;
894         case RES_MPT:
895                 ret = alloc_mpt_tr(id, extra);
896                 break;
897         case RES_MTT:
898                 ret = alloc_mtt_tr(id, extra);
899                 break;
900         case RES_EQ:
901                 ret = alloc_eq_tr(id);
902                 break;
903         case RES_CQ:
904                 ret = alloc_cq_tr(id);
905                 break;
906         case RES_SRQ:
907                 ret = alloc_srq_tr(id);
908                 break;
909         case RES_MAC:
910                 printk(KERN_ERR "implementation missing\n");
911                 return NULL;
912         case RES_COUNTER:
913                 ret = alloc_counter_tr(id);
914                 break;
915         case RES_XRCD:
916                 ret = alloc_xrcdn_tr(id);
917                 break;
918         case RES_FS_RULE:
919                 ret = alloc_fs_rule_tr(id);
920                 break;
921         default:
922                 return NULL;
923         }
924         if (ret)
925                 ret->owner = slave;
926
927         return ret;
928 }
929
930 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
931                          enum mlx4_resource type, int extra)
932 {
933         int i;
934         int err;
935         struct mlx4_priv *priv = mlx4_priv(dev);
936         struct res_common **res_arr;
937         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
938         struct rb_root *root = &tracker->res_tree[type];
939
940         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
941         if (!res_arr)
942                 return -ENOMEM;
943
944         for (i = 0; i < count; ++i) {
945                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
946                 if (!res_arr[i]) {
947                         for (--i; i >= 0; --i)
948                                 kfree(res_arr[i]);
949
950                         kfree(res_arr);
951                         return -ENOMEM;
952                 }
953         }
954
955         spin_lock_irq(mlx4_tlock(dev));
956         for (i = 0; i < count; ++i) {
957                 if (find_res(dev, base + i, type)) {
958                         err = -EEXIST;
959                         goto undo;
960                 }
961                 err = res_tracker_insert(root, res_arr[i]);
962                 if (err)
963                         goto undo;
964                 list_add_tail(&res_arr[i]->list,
965                               &tracker->slave_list[slave].res_list[type]);
966         }
967         spin_unlock_irq(mlx4_tlock(dev));
968         kfree(res_arr);
969
970         return 0;
971
972 undo:
973         for (--i; i >= base; --i)
974                 rb_erase(&res_arr[i]->node, root);
975
976         spin_unlock_irq(mlx4_tlock(dev));
977
978         for (i = 0; i < count; ++i)
979                 kfree(res_arr[i]);
980
981         kfree(res_arr);
982
983         return err;
984 }
985
986 static int remove_qp_ok(struct res_qp *res)
987 {
988         if (res->com.state == RES_QP_BUSY)
989                 return -EBUSY;
990         else if (res->com.state != RES_QP_RESERVED)
991                 return -EPERM;
992
993         return 0;
994 }
995
996 static int remove_mtt_ok(struct res_mtt *res, int order)
997 {
998         if (res->com.state == RES_MTT_BUSY ||
999             atomic_read(&res->ref_count)) {
1000                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1001                        __func__, __LINE__,
1002                        mtt_states_str(res->com.state),
1003                        atomic_read(&res->ref_count));
1004                 return -EBUSY;
1005         } else if (res->com.state != RES_MTT_ALLOCATED)
1006                 return -EPERM;
1007         else if (res->order != order)
1008                 return -EINVAL;
1009
1010         return 0;
1011 }
1012
1013 static int remove_mpt_ok(struct res_mpt *res)
1014 {
1015         if (res->com.state == RES_MPT_BUSY)
1016                 return -EBUSY;
1017         else if (res->com.state != RES_MPT_RESERVED)
1018                 return -EPERM;
1019
1020         return 0;
1021 }
1022
1023 static int remove_eq_ok(struct res_eq *res)
1024 {
1025         if (res->com.state == RES_MPT_BUSY)
1026                 return -EBUSY;
1027         else if (res->com.state != RES_MPT_RESERVED)
1028                 return -EPERM;
1029
1030         return 0;
1031 }
1032
1033 static int remove_counter_ok(struct res_counter *res)
1034 {
1035         if (res->com.state == RES_COUNTER_BUSY)
1036                 return -EBUSY;
1037         else if (res->com.state != RES_COUNTER_ALLOCATED)
1038                 return -EPERM;
1039
1040         return 0;
1041 }
1042
1043 static int remove_xrcdn_ok(struct res_xrcdn *res)
1044 {
1045         if (res->com.state == RES_XRCD_BUSY)
1046                 return -EBUSY;
1047         else if (res->com.state != RES_XRCD_ALLOCATED)
1048                 return -EPERM;
1049
1050         return 0;
1051 }
1052
1053 static int remove_fs_rule_ok(struct res_fs_rule *res)
1054 {
1055         if (res->com.state == RES_FS_RULE_BUSY)
1056                 return -EBUSY;
1057         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1058                 return -EPERM;
1059
1060         return 0;
1061 }
1062
1063 static int remove_cq_ok(struct res_cq *res)
1064 {
1065         if (res->com.state == RES_CQ_BUSY)
1066                 return -EBUSY;
1067         else if (res->com.state != RES_CQ_ALLOCATED)
1068                 return -EPERM;
1069
1070         return 0;
1071 }
1072
1073 static int remove_srq_ok(struct res_srq *res)
1074 {
1075         if (res->com.state == RES_SRQ_BUSY)
1076                 return -EBUSY;
1077         else if (res->com.state != RES_SRQ_ALLOCATED)
1078                 return -EPERM;
1079
1080         return 0;
1081 }
1082
1083 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1084 {
1085         switch (type) {
1086         case RES_QP:
1087                 return remove_qp_ok((struct res_qp *)res);
1088         case RES_CQ:
1089                 return remove_cq_ok((struct res_cq *)res);
1090         case RES_SRQ:
1091                 return remove_srq_ok((struct res_srq *)res);
1092         case RES_MPT:
1093                 return remove_mpt_ok((struct res_mpt *)res);
1094         case RES_MTT:
1095                 return remove_mtt_ok((struct res_mtt *)res, extra);
1096         case RES_MAC:
1097                 return -ENOSYS;
1098         case RES_EQ:
1099                 return remove_eq_ok((struct res_eq *)res);
1100         case RES_COUNTER:
1101                 return remove_counter_ok((struct res_counter *)res);
1102         case RES_XRCD:
1103                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1104         case RES_FS_RULE:
1105                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1106         default:
1107                 return -EINVAL;
1108         }
1109 }
1110
1111 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1112                          enum mlx4_resource type, int extra)
1113 {
1114         u64 i;
1115         int err;
1116         struct mlx4_priv *priv = mlx4_priv(dev);
1117         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1118         struct res_common *r;
1119
1120         spin_lock_irq(mlx4_tlock(dev));
1121         for (i = base; i < base + count; ++i) {
1122                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1123                 if (!r) {
1124                         err = -ENOENT;
1125                         goto out;
1126                 }
1127                 if (r->owner != slave) {
1128                         err = -EPERM;
1129                         goto out;
1130                 }
1131                 err = remove_ok(r, type, extra);
1132                 if (err)
1133                         goto out;
1134         }
1135
1136         for (i = base; i < base + count; ++i) {
1137                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1138                 rb_erase(&r->node, &tracker->res_tree[type]);
1139                 list_del(&r->list);
1140                 kfree(r);
1141         }
1142         err = 0;
1143
1144 out:
1145         spin_unlock_irq(mlx4_tlock(dev));
1146
1147         return err;
1148 }
1149
1150 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1151                                 enum res_qp_states state, struct res_qp **qp,
1152                                 int alloc)
1153 {
1154         struct mlx4_priv *priv = mlx4_priv(dev);
1155         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1156         struct res_qp *r;
1157         int err = 0;
1158
1159         spin_lock_irq(mlx4_tlock(dev));
1160         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1161         if (!r)
1162                 err = -ENOENT;
1163         else if (r->com.owner != slave)
1164                 err = -EPERM;
1165         else {
1166                 switch (state) {
1167                 case RES_QP_BUSY:
1168                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1169                                  __func__, r->com.res_id);
1170                         err = -EBUSY;
1171                         break;
1172
1173                 case RES_QP_RESERVED:
1174                         if (r->com.state == RES_QP_MAPPED && !alloc)
1175                                 break;
1176
1177                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1178                         err = -EINVAL;
1179                         break;
1180
1181                 case RES_QP_MAPPED:
1182                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1183                             r->com.state == RES_QP_HW)
1184                                 break;
1185                         else {
1186                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1187                                           r->com.res_id);
1188                                 err = -EINVAL;
1189                         }
1190
1191                         break;
1192
1193                 case RES_QP_HW:
1194                         if (r->com.state != RES_QP_MAPPED)
1195                                 err = -EINVAL;
1196                         break;
1197                 default:
1198                         err = -EINVAL;
1199                 }
1200
1201                 if (!err) {
1202                         r->com.from_state = r->com.state;
1203                         r->com.to_state = state;
1204                         r->com.state = RES_QP_BUSY;
1205                         if (qp)
1206                                 *qp = r;
1207                 }
1208         }
1209
1210         spin_unlock_irq(mlx4_tlock(dev));
1211
1212         return err;
1213 }
1214
1215 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1216                                 enum res_mpt_states state, struct res_mpt **mpt)
1217 {
1218         struct mlx4_priv *priv = mlx4_priv(dev);
1219         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1220         struct res_mpt *r;
1221         int err = 0;
1222
1223         spin_lock_irq(mlx4_tlock(dev));
1224         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1225         if (!r)
1226                 err = -ENOENT;
1227         else if (r->com.owner != slave)
1228                 err = -EPERM;
1229         else {
1230                 switch (state) {
1231                 case RES_MPT_BUSY:
1232                         err = -EINVAL;
1233                         break;
1234
1235                 case RES_MPT_RESERVED:
1236                         if (r->com.state != RES_MPT_MAPPED)
1237                                 err = -EINVAL;
1238                         break;
1239
1240                 case RES_MPT_MAPPED:
1241                         if (r->com.state != RES_MPT_RESERVED &&
1242                             r->com.state != RES_MPT_HW)
1243                                 err = -EINVAL;
1244                         break;
1245
1246                 case RES_MPT_HW:
1247                         if (r->com.state != RES_MPT_MAPPED)
1248                                 err = -EINVAL;
1249                         break;
1250                 default:
1251                         err = -EINVAL;
1252                 }
1253
1254                 if (!err) {
1255                         r->com.from_state = r->com.state;
1256                         r->com.to_state = state;
1257                         r->com.state = RES_MPT_BUSY;
1258                         if (mpt)
1259                                 *mpt = r;
1260                 }
1261         }
1262
1263         spin_unlock_irq(mlx4_tlock(dev));
1264
1265         return err;
1266 }
1267
1268 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1269                                 enum res_eq_states state, struct res_eq **eq)
1270 {
1271         struct mlx4_priv *priv = mlx4_priv(dev);
1272         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1273         struct res_eq *r;
1274         int err = 0;
1275
1276         spin_lock_irq(mlx4_tlock(dev));
1277         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1278         if (!r)
1279                 err = -ENOENT;
1280         else if (r->com.owner != slave)
1281                 err = -EPERM;
1282         else {
1283                 switch (state) {
1284                 case RES_EQ_BUSY:
1285                         err = -EINVAL;
1286                         break;
1287
1288                 case RES_EQ_RESERVED:
1289                         if (r->com.state != RES_EQ_HW)
1290                                 err = -EINVAL;
1291                         break;
1292
1293                 case RES_EQ_HW:
1294                         if (r->com.state != RES_EQ_RESERVED)
1295                                 err = -EINVAL;
1296                         break;
1297
1298                 default:
1299                         err = -EINVAL;
1300                 }
1301
1302                 if (!err) {
1303                         r->com.from_state = r->com.state;
1304                         r->com.to_state = state;
1305                         r->com.state = RES_EQ_BUSY;
1306                         if (eq)
1307                                 *eq = r;
1308                 }
1309         }
1310
1311         spin_unlock_irq(mlx4_tlock(dev));
1312
1313         return err;
1314 }
1315
1316 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1317                                 enum res_cq_states state, struct res_cq **cq)
1318 {
1319         struct mlx4_priv *priv = mlx4_priv(dev);
1320         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1321         struct res_cq *r;
1322         int err;
1323
1324         spin_lock_irq(mlx4_tlock(dev));
1325         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1326         if (!r)
1327                 err = -ENOENT;
1328         else if (r->com.owner != slave)
1329                 err = -EPERM;
1330         else {
1331                 switch (state) {
1332                 case RES_CQ_BUSY:
1333                         err = -EBUSY;
1334                         break;
1335
1336                 case RES_CQ_ALLOCATED:
1337                         if (r->com.state != RES_CQ_HW)
1338                                 err = -EINVAL;
1339                         else if (atomic_read(&r->ref_count))
1340                                 err = -EBUSY;
1341                         else
1342                                 err = 0;
1343                         break;
1344
1345                 case RES_CQ_HW:
1346                         if (r->com.state != RES_CQ_ALLOCATED)
1347                                 err = -EINVAL;
1348                         else
1349                                 err = 0;
1350                         break;
1351
1352                 default:
1353                         err = -EINVAL;
1354                 }
1355
1356                 if (!err) {
1357                         r->com.from_state = r->com.state;
1358                         r->com.to_state = state;
1359                         r->com.state = RES_CQ_BUSY;
1360                         if (cq)
1361                                 *cq = r;
1362                 }
1363         }
1364
1365         spin_unlock_irq(mlx4_tlock(dev));
1366
1367         return err;
1368 }
1369
1370 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1371                                  enum res_srq_states state, struct res_srq **srq)
1372 {
1373         struct mlx4_priv *priv = mlx4_priv(dev);
1374         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1375         struct res_srq *r;
1376         int err = 0;
1377
1378         spin_lock_irq(mlx4_tlock(dev));
1379         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1380         if (!r)
1381                 err = -ENOENT;
1382         else if (r->com.owner != slave)
1383                 err = -EPERM;
1384         else {
1385                 switch (state) {
1386                 case RES_SRQ_BUSY:
1387                         err = -EINVAL;
1388                         break;
1389
1390                 case RES_SRQ_ALLOCATED:
1391                         if (r->com.state != RES_SRQ_HW)
1392                                 err = -EINVAL;
1393                         else if (atomic_read(&r->ref_count))
1394                                 err = -EBUSY;
1395                         break;
1396
1397                 case RES_SRQ_HW:
1398                         if (r->com.state != RES_SRQ_ALLOCATED)
1399                                 err = -EINVAL;
1400                         break;
1401
1402                 default:
1403                         err = -EINVAL;
1404                 }
1405
1406                 if (!err) {
1407                         r->com.from_state = r->com.state;
1408                         r->com.to_state = state;
1409                         r->com.state = RES_SRQ_BUSY;
1410                         if (srq)
1411                                 *srq = r;
1412                 }
1413         }
1414
1415         spin_unlock_irq(mlx4_tlock(dev));
1416
1417         return err;
1418 }
1419
1420 static void res_abort_move(struct mlx4_dev *dev, int slave,
1421                            enum mlx4_resource type, int id)
1422 {
1423         struct mlx4_priv *priv = mlx4_priv(dev);
1424         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1425         struct res_common *r;
1426
1427         spin_lock_irq(mlx4_tlock(dev));
1428         r = res_tracker_lookup(&tracker->res_tree[type], id);
1429         if (r && (r->owner == slave))
1430                 r->state = r->from_state;
1431         spin_unlock_irq(mlx4_tlock(dev));
1432 }
1433
1434 static void res_end_move(struct mlx4_dev *dev, int slave,
1435                          enum mlx4_resource type, int id)
1436 {
1437         struct mlx4_priv *priv = mlx4_priv(dev);
1438         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1439         struct res_common *r;
1440
1441         spin_lock_irq(mlx4_tlock(dev));
1442         r = res_tracker_lookup(&tracker->res_tree[type], id);
1443         if (r && (r->owner == slave))
1444                 r->state = r->to_state;
1445         spin_unlock_irq(mlx4_tlock(dev));
1446 }
1447
1448 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1449 {
1450         return mlx4_is_qp_reserved(dev, qpn) &&
1451                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1452 }
1453
1454 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1455 {
1456         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1457 }
1458
1459 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1460                         u64 in_param, u64 *out_param)
1461 {
1462         int err;
1463         int count;
1464         int align;
1465         int base;
1466         int qpn;
1467         u8 bf_qp;
1468
1469         switch (op) {
1470         case RES_OP_RESERVE:
1471                 count = get_param_l(&in_param) & 0xffffff;
1472                 bf_qp = get_param_l(&in_param) >> 31;
1473                 align = get_param_h(&in_param);
1474                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1475                 if (err)
1476                         return err;
1477
1478                 err = __mlx4_qp_reserve_range(dev, count, align, &base, bf_qp);
1479                 if (err) {
1480                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1481                         return err;
1482                 }
1483
1484                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1485                 if (err) {
1486                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1487                         __mlx4_qp_release_range(dev, base, count);
1488                         return err;
1489                 }
1490                 set_param_l(out_param, base);
1491                 break;
1492         case RES_OP_MAP_ICM:
1493                 qpn = get_param_l(&in_param) & 0x7fffff;
1494                 if (valid_reserved(dev, slave, qpn)) {
1495                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1496                         if (err)
1497                                 return err;
1498                 }
1499
1500                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1501                                            NULL, 1);
1502                 if (err)
1503                         return err;
1504
1505                 if (!fw_reserved(dev, qpn)) {
1506                         err = __mlx4_qp_alloc_icm(dev, qpn);
1507                         if (err) {
1508                                 res_abort_move(dev, slave, RES_QP, qpn);
1509                                 return err;
1510                         }
1511                 }
1512
1513                 res_end_move(dev, slave, RES_QP, qpn);
1514                 break;
1515
1516         default:
1517                 err = -EINVAL;
1518                 break;
1519         }
1520         return err;
1521 }
1522
1523 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1524                          u64 in_param, u64 *out_param)
1525 {
1526         int err = -EINVAL;
1527         int base;
1528         int order;
1529
1530         if (op != RES_OP_RESERVE_AND_MAP)
1531                 return err;
1532
1533         order = get_param_l(&in_param);
1534
1535         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1536         if (err)
1537                 return err;
1538
1539         base = __mlx4_alloc_mtt_range(dev, order);
1540         if (base == -1) {
1541                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1542                 return -ENOMEM;
1543         }
1544
1545         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1546         if (err) {
1547                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1548                 __mlx4_free_mtt_range(dev, base, order);
1549         } else
1550                 set_param_l(out_param, base);
1551
1552         return err;
1553 }
1554
1555 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1556                          u64 in_param, u64 *out_param)
1557 {
1558         int err = -EINVAL;
1559         int index;
1560         int id;
1561         struct res_mpt *mpt;
1562
1563         switch (op) {
1564         case RES_OP_RESERVE:
1565                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1566                 if (err)
1567                         break;
1568
1569                 index = __mlx4_mr_reserve(dev);
1570                 if (index == -1) {
1571                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1572                         break;
1573                 }
1574                 id = index & mpt_mask(dev);
1575
1576                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1577                 if (err) {
1578                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1579                         __mlx4_mr_release(dev, index);
1580                         break;
1581                 }
1582                 set_param_l(out_param, index);
1583                 break;
1584         case RES_OP_MAP_ICM:
1585                 index = get_param_l(&in_param);
1586                 id = index & mpt_mask(dev);
1587                 err = mr_res_start_move_to(dev, slave, id,
1588                                            RES_MPT_MAPPED, &mpt);
1589                 if (err)
1590                         return err;
1591
1592                 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1593                 if (err) {
1594                         res_abort_move(dev, slave, RES_MPT, id);
1595                         return err;
1596                 }
1597
1598                 res_end_move(dev, slave, RES_MPT, id);
1599                 break;
1600         }
1601         return err;
1602 }
1603
1604 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1605                         u64 in_param, u64 *out_param)
1606 {
1607         int cqn;
1608         int err;
1609
1610         switch (op) {
1611         case RES_OP_RESERVE_AND_MAP:
1612                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1613                 if (err)
1614                         break;
1615
1616                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1617                 if (err) {
1618                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1619                         break;
1620                 }
1621
1622                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1623                 if (err) {
1624                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1625                         __mlx4_cq_free_icm(dev, cqn);
1626                         break;
1627                 }
1628
1629                 set_param_l(out_param, cqn);
1630                 break;
1631
1632         default:
1633                 err = -EINVAL;
1634         }
1635
1636         return err;
1637 }
1638
1639 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1640                          u64 in_param, u64 *out_param)
1641 {
1642         int srqn;
1643         int err;
1644
1645         switch (op) {
1646         case RES_OP_RESERVE_AND_MAP:
1647                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1648                 if (err)
1649                         break;
1650
1651                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1652                 if (err) {
1653                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1654                         break;
1655                 }
1656
1657                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1658                 if (err) {
1659                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1660                         __mlx4_srq_free_icm(dev, srqn);
1661                         break;
1662                 }
1663
1664                 set_param_l(out_param, srqn);
1665                 break;
1666
1667         default:
1668                 err = -EINVAL;
1669         }
1670
1671         return err;
1672 }
1673
1674 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1675                                      u8 smac_index, u64 *mac)
1676 {
1677         struct mlx4_priv *priv = mlx4_priv(dev);
1678         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1679         struct list_head *mac_list =
1680                 &tracker->slave_list[slave].res_list[RES_MAC];
1681         struct mac_res *res, *tmp;
1682
1683         list_for_each_entry_safe(res, tmp, mac_list, list) {
1684                 if (res->smac_index == smac_index && res->port == (u8) port) {
1685                         *mac = res->mac;
1686                         return 0;
1687                 }
1688         }
1689         return -ENOENT;
1690 }
1691
1692 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1693 {
1694         struct mlx4_priv *priv = mlx4_priv(dev);
1695         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1696         struct list_head *mac_list =
1697                 &tracker->slave_list[slave].res_list[RES_MAC];
1698         struct mac_res *res, *tmp;
1699
1700         list_for_each_entry_safe(res, tmp, mac_list, list) {
1701                 if (res->mac == mac && res->port == (u8) port) {
1702                         /* mac found. update ref count */
1703                         ++res->ref_count;
1704                         return 0;
1705                 }
1706         }
1707
1708         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1709                 return -EINVAL;
1710         res = kzalloc(sizeof *res, GFP_KERNEL);
1711         if (!res) {
1712                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1713                 return -ENOMEM;
1714         }
1715         res->mac = mac;
1716         res->port = (u8) port;
1717         res->smac_index = smac_index;
1718         res->ref_count = 1;
1719         list_add_tail(&res->list,
1720                       &tracker->slave_list[slave].res_list[RES_MAC]);
1721         return 0;
1722 }
1723
1724
1725 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1726                                int port)
1727 {
1728         struct mlx4_priv *priv = mlx4_priv(dev);
1729         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1730         struct list_head *mac_list =
1731                 &tracker->slave_list[slave].res_list[RES_MAC];
1732         struct mac_res *res, *tmp;
1733
1734         list_for_each_entry_safe(res, tmp, mac_list, list) {
1735                 if (res->mac == mac && res->port == (u8) port) {
1736                         if (!--res->ref_count) {
1737                                 list_del(&res->list);
1738                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1739                                 kfree(res);
1740                         }
1741                         break;
1742                 }
1743         }
1744 }
1745
1746 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1747 {
1748         struct mlx4_priv *priv = mlx4_priv(dev);
1749         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1750         struct list_head *mac_list =
1751                 &tracker->slave_list[slave].res_list[RES_MAC];
1752         struct mac_res *res, *tmp;
1753         int i;
1754
1755         list_for_each_entry_safe(res, tmp, mac_list, list) {
1756                 list_del(&res->list);
1757                 /* dereference the mac the num times the slave referenced it */
1758                 for (i = 0; i < res->ref_count; i++)
1759                         __mlx4_unregister_mac(dev, res->port, res->mac);
1760                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1761                 kfree(res);
1762         }
1763 }
1764
1765 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1766                          u64 in_param, u64 *out_param, int in_port)
1767 {
1768         int err = -EINVAL;
1769         int port;
1770         u64 mac;
1771         u8 smac_index = 0;
1772
1773         if (op != RES_OP_RESERVE_AND_MAP)
1774                 return err;
1775
1776         port = !in_port ? get_param_l(out_param) : in_port;
1777         mac = in_param;
1778
1779         err = __mlx4_register_mac(dev, port, mac);
1780         if (err >= 0) {
1781                 smac_index = err;
1782                 set_param_l(out_param, err);
1783                 err = 0;
1784         }
1785
1786         if (!err) {
1787                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1788                 if (err)
1789                         __mlx4_unregister_mac(dev, port, mac);
1790         }
1791         return err;
1792 }
1793
1794 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1795                              int port, int vlan_index)
1796 {
1797         struct mlx4_priv *priv = mlx4_priv(dev);
1798         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1799         struct list_head *vlan_list =
1800                 &tracker->slave_list[slave].res_list[RES_VLAN];
1801         struct vlan_res *res, *tmp;
1802
1803         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1804                 if (res->vlan == vlan && res->port == (u8) port) {
1805                         /* vlan found. update ref count */
1806                         ++res->ref_count;
1807                         return 0;
1808                 }
1809         }
1810
1811         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1812                 return -EINVAL;
1813         res = kzalloc(sizeof(*res), GFP_KERNEL);
1814         if (!res) {
1815                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1816                 return -ENOMEM;
1817         }
1818         res->vlan = vlan;
1819         res->port = (u8) port;
1820         res->vlan_index = vlan_index;
1821         res->ref_count = 1;
1822         list_add_tail(&res->list,
1823                       &tracker->slave_list[slave].res_list[RES_VLAN]);
1824         return 0;
1825 }
1826
1827
1828 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1829                                 int port)
1830 {
1831         struct mlx4_priv *priv = mlx4_priv(dev);
1832         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1833         struct list_head *vlan_list =
1834                 &tracker->slave_list[slave].res_list[RES_VLAN];
1835         struct vlan_res *res, *tmp;
1836
1837         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1838                 if (res->vlan == vlan && res->port == (u8) port) {
1839                         if (!--res->ref_count) {
1840                                 list_del(&res->list);
1841                                 mlx4_release_resource(dev, slave, RES_VLAN,
1842                                                       1, port);
1843                                 kfree(res);
1844                         }
1845                         break;
1846                 }
1847         }
1848 }
1849
1850 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1851 {
1852         struct mlx4_priv *priv = mlx4_priv(dev);
1853         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1854         struct list_head *vlan_list =
1855                 &tracker->slave_list[slave].res_list[RES_VLAN];
1856         struct vlan_res *res, *tmp;
1857         int i;
1858
1859         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1860                 list_del(&res->list);
1861                 /* dereference the vlan the num times the slave referenced it */
1862                 for (i = 0; i < res->ref_count; i++)
1863                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
1864                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1865                 kfree(res);
1866         }
1867 }
1868
1869 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1870                           u64 in_param, u64 *out_param, int port)
1871 {
1872         int err = -EINVAL;
1873         u16 vlan;
1874         int vlan_index;
1875
1876         if (!port)
1877                 return err;
1878
1879         if (op != RES_OP_RESERVE_AND_MAP)
1880                 return err;
1881
1882         vlan = (u16) in_param;
1883
1884         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1885         if (!err) {
1886                 set_param_l(out_param, (u32) vlan_index);
1887                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1888                 if (err)
1889                         __mlx4_unregister_vlan(dev, port, vlan);
1890         }
1891         return err;
1892 }
1893
1894 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1895                              u64 in_param, u64 *out_param)
1896 {
1897         u32 index;
1898         int err;
1899
1900         if (op != RES_OP_RESERVE)
1901                 return -EINVAL;
1902
1903         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1904         if (err)
1905                 return err;
1906
1907         err = __mlx4_counter_alloc(dev, &index);
1908         if (err) {
1909                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1910                 return err;
1911         }
1912
1913         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1914         if (err) {
1915                 __mlx4_counter_free(dev, index);
1916                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1917         } else {
1918                 set_param_l(out_param, index);
1919         }
1920
1921         return err;
1922 }
1923
1924 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1925                            u64 in_param, u64 *out_param)
1926 {
1927         u32 xrcdn;
1928         int err;
1929
1930         if (op != RES_OP_RESERVE)
1931                 return -EINVAL;
1932
1933         err = __mlx4_xrcd_alloc(dev, &xrcdn);
1934         if (err)
1935                 return err;
1936
1937         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1938         if (err)
1939                 __mlx4_xrcd_free(dev, xrcdn);
1940         else
1941                 set_param_l(out_param, xrcdn);
1942
1943         return err;
1944 }
1945
1946 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1947                            struct mlx4_vhcr *vhcr,
1948                            struct mlx4_cmd_mailbox *inbox,
1949                            struct mlx4_cmd_mailbox *outbox,
1950                            struct mlx4_cmd_info *cmd)
1951 {
1952         int err;
1953         int alop = vhcr->op_modifier;
1954
1955         switch (vhcr->in_modifier & 0xFF) {
1956         case RES_QP:
1957                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1958                                    vhcr->in_param, &vhcr->out_param);
1959                 break;
1960
1961         case RES_MTT:
1962                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1963                                     vhcr->in_param, &vhcr->out_param);
1964                 break;
1965
1966         case RES_MPT:
1967                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1968                                     vhcr->in_param, &vhcr->out_param);
1969                 break;
1970
1971         case RES_CQ:
1972                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1973                                    vhcr->in_param, &vhcr->out_param);
1974                 break;
1975
1976         case RES_SRQ:
1977                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1978                                     vhcr->in_param, &vhcr->out_param);
1979                 break;
1980
1981         case RES_MAC:
1982                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1983                                     vhcr->in_param, &vhcr->out_param,
1984                                     (vhcr->in_modifier >> 8) & 0xFF);
1985                 break;
1986
1987         case RES_VLAN:
1988                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1989                                      vhcr->in_param, &vhcr->out_param,
1990                                      (vhcr->in_modifier >> 8) & 0xFF);
1991                 break;
1992
1993         case RES_COUNTER:
1994                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1995                                         vhcr->in_param, &vhcr->out_param);
1996                 break;
1997
1998         case RES_XRCD:
1999                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2000                                       vhcr->in_param, &vhcr->out_param);
2001                 break;
2002
2003         default:
2004                 err = -EINVAL;
2005                 break;
2006         }
2007
2008         return err;
2009 }
2010
2011 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2012                        u64 in_param)
2013 {
2014         int err;
2015         int count;
2016         int base;
2017         int qpn;
2018
2019         switch (op) {
2020         case RES_OP_RESERVE:
2021                 base = get_param_l(&in_param) & 0x7fffff;
2022                 count = get_param_h(&in_param);
2023                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2024                 if (err)
2025                         break;
2026                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2027                 __mlx4_qp_release_range(dev, base, count);
2028                 break;
2029         case RES_OP_MAP_ICM:
2030                 qpn = get_param_l(&in_param) & 0x7fffff;
2031                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2032                                            NULL, 0);
2033                 if (err)
2034                         return err;
2035
2036                 if (!fw_reserved(dev, qpn))
2037                         __mlx4_qp_free_icm(dev, qpn);
2038
2039                 res_end_move(dev, slave, RES_QP, qpn);
2040
2041                 if (valid_reserved(dev, slave, qpn))
2042                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2043                 break;
2044         default:
2045                 err = -EINVAL;
2046                 break;
2047         }
2048         return err;
2049 }
2050
2051 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2052                         u64 in_param, u64 *out_param)
2053 {
2054         int err = -EINVAL;
2055         int base;
2056         int order;
2057
2058         if (op != RES_OP_RESERVE_AND_MAP)
2059                 return err;
2060
2061         base = get_param_l(&in_param);
2062         order = get_param_h(&in_param);
2063         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2064         if (!err) {
2065                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2066                 __mlx4_free_mtt_range(dev, base, order);
2067         }
2068         return err;
2069 }
2070
2071 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2072                         u64 in_param)
2073 {
2074         int err = -EINVAL;
2075         int index;
2076         int id;
2077         struct res_mpt *mpt;
2078
2079         switch (op) {
2080         case RES_OP_RESERVE:
2081                 index = get_param_l(&in_param);
2082                 id = index & mpt_mask(dev);
2083                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2084                 if (err)
2085                         break;
2086                 index = mpt->key;
2087                 put_res(dev, slave, id, RES_MPT);
2088
2089                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2090                 if (err)
2091                         break;
2092                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2093                 __mlx4_mr_release(dev, index);
2094                 break;
2095         case RES_OP_MAP_ICM:
2096                         index = get_param_l(&in_param);
2097                         id = index & mpt_mask(dev);
2098                         err = mr_res_start_move_to(dev, slave, id,
2099                                                    RES_MPT_RESERVED, &mpt);
2100                         if (err)
2101                                 return err;
2102
2103                         __mlx4_mr_free_icm(dev, mpt->key);
2104                         res_end_move(dev, slave, RES_MPT, id);
2105                         return err;
2106                 break;
2107         default:
2108                 err = -EINVAL;
2109                 break;
2110         }
2111         return err;
2112 }
2113
2114 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2115                        u64 in_param, u64 *out_param)
2116 {
2117         int cqn;
2118         int err;
2119
2120         switch (op) {
2121         case RES_OP_RESERVE_AND_MAP:
2122                 cqn = get_param_l(&in_param);
2123                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2124                 if (err)
2125                         break;
2126
2127                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2128                 __mlx4_cq_free_icm(dev, cqn);
2129                 break;
2130
2131         default:
2132                 err = -EINVAL;
2133                 break;
2134         }
2135
2136         return err;
2137 }
2138
2139 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2140                         u64 in_param, u64 *out_param)
2141 {
2142         int srqn;
2143         int err;
2144
2145         switch (op) {
2146         case RES_OP_RESERVE_AND_MAP:
2147                 srqn = get_param_l(&in_param);
2148                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2149                 if (err)
2150                         break;
2151
2152                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2153                 __mlx4_srq_free_icm(dev, srqn);
2154                 break;
2155
2156         default:
2157                 err = -EINVAL;
2158                 break;
2159         }
2160
2161         return err;
2162 }
2163
2164 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2165                             u64 in_param, u64 *out_param, int in_port)
2166 {
2167         int port;
2168         int err = 0;
2169
2170         switch (op) {
2171         case RES_OP_RESERVE_AND_MAP:
2172                 port = !in_port ? get_param_l(out_param) : in_port;
2173                 mac_del_from_slave(dev, slave, in_param, port);
2174                 __mlx4_unregister_mac(dev, port, in_param);
2175                 break;
2176         default:
2177                 err = -EINVAL;
2178                 break;
2179         }
2180
2181         return err;
2182
2183 }
2184
2185 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2186                             u64 in_param, u64 *out_param, int port)
2187 {
2188         int err = 0;
2189
2190         switch (op) {
2191         case RES_OP_RESERVE_AND_MAP:
2192                 if (!port)
2193                         return -EINVAL;
2194                 vlan_del_from_slave(dev, slave, in_param, port);
2195                 __mlx4_unregister_vlan(dev, port, in_param);
2196                 break;
2197         default:
2198                 err = -EINVAL;
2199                 break;
2200         }
2201
2202         return err;
2203 }
2204
2205 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2206                             u64 in_param, u64 *out_param)
2207 {
2208         int index;
2209         int err;
2210
2211         if (op != RES_OP_RESERVE)
2212                 return -EINVAL;
2213
2214         index = get_param_l(&in_param);
2215         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2216         if (err)
2217                 return err;
2218
2219         __mlx4_counter_free(dev, index);
2220         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2221
2222         return err;
2223 }
2224
2225 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2226                           u64 in_param, u64 *out_param)
2227 {
2228         int xrcdn;
2229         int err;
2230
2231         if (op != RES_OP_RESERVE)
2232                 return -EINVAL;
2233
2234         xrcdn = get_param_l(&in_param);
2235         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2236         if (err)
2237                 return err;
2238
2239         __mlx4_xrcd_free(dev, xrcdn);
2240
2241         return err;
2242 }
2243
2244 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2245                           struct mlx4_vhcr *vhcr,
2246                           struct mlx4_cmd_mailbox *inbox,
2247                           struct mlx4_cmd_mailbox *outbox,
2248                           struct mlx4_cmd_info *cmd)
2249 {
2250         int err = -EINVAL;
2251         int alop = vhcr->op_modifier;
2252
2253         switch (vhcr->in_modifier & 0xFF) {
2254         case RES_QP:
2255                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2256                                   vhcr->in_param);
2257                 break;
2258
2259         case RES_MTT:
2260                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2261                                    vhcr->in_param, &vhcr->out_param);
2262                 break;
2263
2264         case RES_MPT:
2265                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2266                                    vhcr->in_param);
2267                 break;
2268
2269         case RES_CQ:
2270                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2271                                   vhcr->in_param, &vhcr->out_param);
2272                 break;
2273
2274         case RES_SRQ:
2275                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2276                                    vhcr->in_param, &vhcr->out_param);
2277                 break;
2278
2279         case RES_MAC:
2280                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2281                                    vhcr->in_param, &vhcr->out_param,
2282                                    (vhcr->in_modifier >> 8) & 0xFF);
2283                 break;
2284
2285         case RES_VLAN:
2286                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2287                                     vhcr->in_param, &vhcr->out_param,
2288                                     (vhcr->in_modifier >> 8) & 0xFF);
2289                 break;
2290
2291         case RES_COUNTER:
2292                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2293                                        vhcr->in_param, &vhcr->out_param);
2294                 break;
2295
2296         case RES_XRCD:
2297                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2298                                      vhcr->in_param, &vhcr->out_param);
2299
2300         default:
2301                 break;
2302         }
2303         return err;
2304 }
2305
2306 /* ugly but other choices are uglier */
2307 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2308 {
2309         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2310 }
2311
2312 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2313 {
2314         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2315 }
2316
2317 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2318 {
2319         return be32_to_cpu(mpt->mtt_sz);
2320 }
2321
2322 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2323 {
2324         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2325 }
2326
2327 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2328 {
2329         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2330 }
2331
2332 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2333 {
2334         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2335         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2336         int log_sq_sride = qpc->sq_size_stride & 7;
2337         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2338         int log_rq_stride = qpc->rq_size_stride & 7;
2339         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2340         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2341         int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
2342         int sq_size;
2343         int rq_size;
2344         int total_pages;
2345         int total_mem;
2346         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2347
2348         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2349         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2350         total_mem = sq_size + rq_size;
2351         total_pages =
2352                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2353                                    page_shift);
2354
2355         return total_pages;
2356 }
2357
2358 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2359                            int size, struct res_mtt *mtt)
2360 {
2361         int res_start = mtt->com.res_id;
2362         int res_size = (1 << mtt->order);
2363
2364         if (start < res_start || start + size > res_start + res_size)
2365                 return -EPERM;
2366         return 0;
2367 }
2368
2369 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2370                            struct mlx4_vhcr *vhcr,
2371                            struct mlx4_cmd_mailbox *inbox,
2372                            struct mlx4_cmd_mailbox *outbox,
2373                            struct mlx4_cmd_info *cmd)
2374 {
2375         int err;
2376         int index = vhcr->in_modifier;
2377         struct res_mtt *mtt;
2378         struct res_mpt *mpt;
2379         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2380         int phys;
2381         int id;
2382
2383         id = index & mpt_mask(dev);
2384         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2385         if (err)
2386                 return err;
2387
2388         phys = mr_phys_mpt(inbox->buf);
2389         if (!phys) {
2390                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2391                 if (err)
2392                         goto ex_abort;
2393
2394                 err = check_mtt_range(dev, slave, mtt_base,
2395                                       mr_get_mtt_size(inbox->buf), mtt);
2396                 if (err)
2397                         goto ex_put;
2398
2399                 mpt->mtt = mtt;
2400         }
2401
2402         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2403         if (err)
2404                 goto ex_put;
2405
2406         if (!phys) {
2407                 atomic_inc(&mtt->ref_count);
2408                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2409         }
2410
2411         res_end_move(dev, slave, RES_MPT, id);
2412         return 0;
2413
2414 ex_put:
2415         if (!phys)
2416                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2417 ex_abort:
2418         res_abort_move(dev, slave, RES_MPT, id);
2419
2420         return err;
2421 }
2422
2423 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2424                            struct mlx4_vhcr *vhcr,
2425                            struct mlx4_cmd_mailbox *inbox,
2426                            struct mlx4_cmd_mailbox *outbox,
2427                            struct mlx4_cmd_info *cmd)
2428 {
2429         int err;
2430         int index = vhcr->in_modifier;
2431         struct res_mpt *mpt;
2432         int id;
2433
2434         id = index & mpt_mask(dev);
2435         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2436         if (err)
2437                 return err;
2438
2439         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2440         if (err)
2441                 goto ex_abort;
2442
2443         if (mpt->mtt)
2444                 atomic_dec(&mpt->mtt->ref_count);
2445
2446         res_end_move(dev, slave, RES_MPT, id);
2447         return 0;
2448
2449 ex_abort:
2450         res_abort_move(dev, slave, RES_MPT, id);
2451
2452         return err;
2453 }
2454
2455 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2456                            struct mlx4_vhcr *vhcr,
2457                            struct mlx4_cmd_mailbox *inbox,
2458                            struct mlx4_cmd_mailbox *outbox,
2459                            struct mlx4_cmd_info *cmd)
2460 {
2461         int err;
2462         int index = vhcr->in_modifier;
2463         struct res_mpt *mpt;
2464         int id;
2465
2466         id = index & mpt_mask(dev);
2467         err = get_res(dev, slave, id, RES_MPT, &mpt);
2468         if (err)
2469                 return err;
2470
2471         if (mpt->com.from_state != RES_MPT_HW) {
2472                 err = -EBUSY;
2473                 goto out;
2474         }
2475
2476         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2477
2478 out:
2479         put_res(dev, slave, id, RES_MPT);
2480         return err;
2481 }
2482
2483 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2484 {
2485         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2486 }
2487
2488 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2489 {
2490         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2491 }
2492
2493 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2494 {
2495         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2496 }
2497
2498 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2499                                   struct mlx4_qp_context *context)
2500 {
2501         u32 qpn = vhcr->in_modifier & 0xffffff;
2502         u32 qkey = 0;
2503
2504         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2505                 return;
2506
2507         /* adjust qkey in qp context */
2508         context->qkey = cpu_to_be32(qkey);
2509 }
2510
2511 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2512                              struct mlx4_vhcr *vhcr,
2513                              struct mlx4_cmd_mailbox *inbox,
2514                              struct mlx4_cmd_mailbox *outbox,
2515                              struct mlx4_cmd_info *cmd)
2516 {
2517         int err;
2518         int qpn = vhcr->in_modifier & 0x7fffff;
2519         struct res_mtt *mtt;
2520         struct res_qp *qp;
2521         struct mlx4_qp_context *qpc = inbox->buf + 8;
2522         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2523         int mtt_size = qp_get_mtt_size(qpc);
2524         struct res_cq *rcq;
2525         struct res_cq *scq;
2526         int rcqn = qp_get_rcqn(qpc);
2527         int scqn = qp_get_scqn(qpc);
2528         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2529         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2530         struct res_srq *srq;
2531         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2532
2533         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2534         if (err)
2535                 return err;
2536         qp->local_qpn = local_qpn;
2537
2538         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2539         if (err)
2540                 goto ex_abort;
2541
2542         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2543         if (err)
2544                 goto ex_put_mtt;
2545
2546         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2547         if (err)
2548                 goto ex_put_mtt;
2549
2550         if (scqn != rcqn) {
2551                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2552                 if (err)
2553                         goto ex_put_rcq;
2554         } else
2555                 scq = rcq;
2556
2557         if (use_srq) {
2558                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2559                 if (err)
2560                         goto ex_put_scq;
2561         }
2562
2563         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2564         update_pkey_index(dev, slave, inbox);
2565         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2566         if (err)
2567                 goto ex_put_srq;
2568         atomic_inc(&mtt->ref_count);
2569         qp->mtt = mtt;
2570         atomic_inc(&rcq->ref_count);
2571         qp->rcq = rcq;
2572         atomic_inc(&scq->ref_count);
2573         qp->scq = scq;
2574
2575         if (scqn != rcqn)
2576                 put_res(dev, slave, scqn, RES_CQ);
2577
2578         if (use_srq) {
2579                 atomic_inc(&srq->ref_count);
2580                 put_res(dev, slave, srqn, RES_SRQ);
2581                 qp->srq = srq;
2582         }
2583         put_res(dev, slave, rcqn, RES_CQ);
2584         put_res(dev, slave, mtt_base, RES_MTT);
2585         res_end_move(dev, slave, RES_QP, qpn);
2586
2587         return 0;
2588
2589 ex_put_srq:
2590         if (use_srq)
2591                 put_res(dev, slave, srqn, RES_SRQ);
2592 ex_put_scq:
2593         if (scqn != rcqn)
2594                 put_res(dev, slave, scqn, RES_CQ);
2595 ex_put_rcq:
2596         put_res(dev, slave, rcqn, RES_CQ);
2597 ex_put_mtt:
2598         put_res(dev, slave, mtt_base, RES_MTT);
2599 ex_abort:
2600         res_abort_move(dev, slave, RES_QP, qpn);
2601
2602         return err;
2603 }
2604
2605 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2606 {
2607         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2608 }
2609
2610 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2611 {
2612         int log_eq_size = eqc->log_eq_size & 0x1f;
2613         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2614
2615         if (log_eq_size + 5 < page_shift)
2616                 return 1;
2617
2618         return 1 << (log_eq_size + 5 - page_shift);
2619 }
2620
2621 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2622 {
2623         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2624 }
2625
2626 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2627 {
2628         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2629         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2630
2631         if (log_cq_size + 5 < page_shift)
2632                 return 1;
2633
2634         return 1 << (log_cq_size + 5 - page_shift);
2635 }
2636
2637 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2638                           struct mlx4_vhcr *vhcr,
2639                           struct mlx4_cmd_mailbox *inbox,
2640                           struct mlx4_cmd_mailbox *outbox,
2641                           struct mlx4_cmd_info *cmd)
2642 {
2643         int err;
2644         int eqn = vhcr->in_modifier;
2645         int res_id = (slave << 8) | eqn;
2646         struct mlx4_eq_context *eqc = inbox->buf;
2647         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2648         int mtt_size = eq_get_mtt_size(eqc);
2649         struct res_eq *eq;
2650         struct res_mtt *mtt;
2651
2652         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2653         if (err)
2654                 return err;
2655         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2656         if (err)
2657                 goto out_add;
2658
2659         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2660         if (err)
2661                 goto out_move;
2662
2663         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2664         if (err)
2665                 goto out_put;
2666
2667         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2668         if (err)
2669                 goto out_put;
2670
2671         atomic_inc(&mtt->ref_count);
2672         eq->mtt = mtt;
2673         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2674         res_end_move(dev, slave, RES_EQ, res_id);
2675         return 0;
2676
2677 out_put:
2678         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2679 out_move:
2680         res_abort_move(dev, slave, RES_EQ, res_id);
2681 out_add:
2682         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2683         return err;
2684 }
2685
2686 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2687                               int len, struct res_mtt **res)
2688 {
2689         struct mlx4_priv *priv = mlx4_priv(dev);
2690         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2691         struct res_mtt *mtt;
2692         int err = -EINVAL;
2693
2694         spin_lock_irq(mlx4_tlock(dev));
2695         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2696                             com.list) {
2697                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2698                         *res = mtt;
2699                         mtt->com.from_state = mtt->com.state;
2700                         mtt->com.state = RES_MTT_BUSY;
2701                         err = 0;
2702                         break;
2703                 }
2704         }
2705         spin_unlock_irq(mlx4_tlock(dev));
2706
2707         return err;
2708 }
2709
2710 static int verify_qp_parameters(struct mlx4_dev *dev,
2711                                 struct mlx4_cmd_mailbox *inbox,
2712                                 enum qp_transition transition, u8 slave)
2713 {
2714         u32                     qp_type;
2715         struct mlx4_qp_context  *qp_ctx;
2716         enum mlx4_qp_optpar     optpar;
2717         int port;
2718         int num_gids;
2719
2720         qp_ctx  = inbox->buf + 8;
2721         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2722         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2723
2724         switch (qp_type) {
2725         case MLX4_QP_ST_RC:
2726         case MLX4_QP_ST_UC:
2727                 switch (transition) {
2728                 case QP_TRANS_INIT2RTR:
2729                 case QP_TRANS_RTR2RTS:
2730                 case QP_TRANS_RTS2RTS:
2731                 case QP_TRANS_SQD2SQD:
2732                 case QP_TRANS_SQD2RTS:
2733                         if (slave != mlx4_master_func_num(dev))
2734                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2735                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2736                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2737                                                 num_gids = mlx4_get_slave_num_gids(dev, slave);
2738                                         else
2739                                                 num_gids = 1;
2740                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
2741                                                 return -EINVAL;
2742                                 }
2743                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2744                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2745                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2746                                                 num_gids = mlx4_get_slave_num_gids(dev, slave);
2747                                         else
2748                                                 num_gids = 1;
2749                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
2750                                                 return -EINVAL;
2751                                 }
2752                         break;
2753                 default:
2754                         break;
2755                 }
2756
2757                 break;
2758         default:
2759                 break;
2760         }
2761
2762         return 0;
2763 }
2764
2765 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2766                            struct mlx4_vhcr *vhcr,
2767                            struct mlx4_cmd_mailbox *inbox,
2768                            struct mlx4_cmd_mailbox *outbox,
2769                            struct mlx4_cmd_info *cmd)
2770 {
2771         struct mlx4_mtt mtt;
2772         __be64 *page_list = inbox->buf;
2773         u64 *pg_list = (u64 *)page_list;
2774         int i;
2775         struct res_mtt *rmtt = NULL;
2776         int start = be64_to_cpu(page_list[0]);
2777         int npages = vhcr->in_modifier;
2778         int err;
2779
2780         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2781         if (err)
2782                 return err;
2783
2784         /* Call the SW implementation of write_mtt:
2785          * - Prepare a dummy mtt struct
2786          * - Translate inbox contents to simple addresses in host endianess */
2787         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2788                             we don't really use it */
2789         mtt.order = 0;
2790         mtt.page_shift = 0;
2791         for (i = 0; i < npages; ++i)
2792                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2793
2794         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2795                                ((u64 *)page_list + 2));
2796
2797         if (rmtt)
2798                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2799
2800         return err;
2801 }
2802
2803 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2804                           struct mlx4_vhcr *vhcr,
2805                           struct mlx4_cmd_mailbox *inbox,
2806                           struct mlx4_cmd_mailbox *outbox,
2807                           struct mlx4_cmd_info *cmd)
2808 {
2809         int eqn = vhcr->in_modifier;
2810         int res_id = eqn | (slave << 8);
2811         struct res_eq *eq;
2812         int err;
2813
2814         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2815         if (err)
2816                 return err;
2817
2818         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2819         if (err)
2820                 goto ex_abort;
2821
2822         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2823         if (err)
2824                 goto ex_put;
2825
2826         atomic_dec(&eq->mtt->ref_count);
2827         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2828         res_end_move(dev, slave, RES_EQ, res_id);
2829         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2830
2831         return 0;
2832
2833 ex_put:
2834         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2835 ex_abort:
2836         res_abort_move(dev, slave, RES_EQ, res_id);
2837
2838         return err;
2839 }
2840
2841 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2842 {
2843         struct mlx4_priv *priv = mlx4_priv(dev);
2844         struct mlx4_slave_event_eq_info *event_eq;
2845         struct mlx4_cmd_mailbox *mailbox;
2846         u32 in_modifier = 0;
2847         int err;
2848         int res_id;
2849         struct res_eq *req;
2850
2851         if (!priv->mfunc.master.slave_state)
2852                 return -EINVAL;
2853
2854         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2855
2856         /* Create the event only if the slave is registered */
2857         if (event_eq->eqn < 0)
2858                 return 0;
2859
2860         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2861         res_id = (slave << 8) | event_eq->eqn;
2862         err = get_res(dev, slave, res_id, RES_EQ, &req);
2863         if (err)
2864                 goto unlock;
2865
2866         if (req->com.from_state != RES_EQ_HW) {
2867                 err = -EINVAL;
2868                 goto put;
2869         }
2870
2871         mailbox = mlx4_alloc_cmd_mailbox(dev);
2872         if (IS_ERR(mailbox)) {
2873                 err = PTR_ERR(mailbox);
2874                 goto put;
2875         }
2876
2877         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2878                 ++event_eq->token;
2879                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2880         }
2881
2882         memcpy(mailbox->buf, (u8 *) eqe, 28);
2883
2884         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2885
2886         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2887                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2888                        MLX4_CMD_NATIVE);
2889
2890         put_res(dev, slave, res_id, RES_EQ);
2891         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2892         mlx4_free_cmd_mailbox(dev, mailbox);
2893         return err;
2894
2895 put:
2896         put_res(dev, slave, res_id, RES_EQ);
2897
2898 unlock:
2899         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2900         return err;
2901 }
2902
2903 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2904                           struct mlx4_vhcr *vhcr,
2905                           struct mlx4_cmd_mailbox *inbox,
2906                           struct mlx4_cmd_mailbox *outbox,
2907                           struct mlx4_cmd_info *cmd)
2908 {
2909         int eqn = vhcr->in_modifier;
2910         int res_id = eqn | (slave << 8);
2911         struct res_eq *eq;
2912         int err;
2913
2914         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2915         if (err)
2916                 return err;
2917
2918         if (eq->com.from_state != RES_EQ_HW) {
2919                 err = -EINVAL;
2920                 goto ex_put;
2921         }
2922
2923         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2924
2925 ex_put:
2926         put_res(dev, slave, res_id, RES_EQ);
2927         return err;
2928 }
2929
2930 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2931                           struct mlx4_vhcr *vhcr,
2932                           struct mlx4_cmd_mailbox *inbox,
2933                           struct mlx4_cmd_mailbox *outbox,
2934                           struct mlx4_cmd_info *cmd)
2935 {
2936         int err;
2937         int cqn = vhcr->in_modifier;
2938         struct mlx4_cq_context *cqc = inbox->buf;
2939         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2940         struct res_cq *cq;
2941         struct res_mtt *mtt;
2942
2943         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2944         if (err)
2945                 return err;
2946         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2947         if (err)
2948                 goto out_move;
2949         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2950         if (err)
2951                 goto out_put;
2952         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2953         if (err)
2954                 goto out_put;
2955         atomic_inc(&mtt->ref_count);
2956         cq->mtt = mtt;
2957         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2958         res_end_move(dev, slave, RES_CQ, cqn);
2959         return 0;
2960
2961 out_put:
2962         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2963 out_move:
2964         res_abort_move(dev, slave, RES_CQ, cqn);
2965         return err;
2966 }
2967
2968 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2969                           struct mlx4_vhcr *vhcr,
2970                           struct mlx4_cmd_mailbox *inbox,
2971                           struct mlx4_cmd_mailbox *outbox,
2972                           struct mlx4_cmd_info *cmd)
2973 {
2974         int err;
2975         int cqn = vhcr->in_modifier;
2976         struct res_cq *cq;
2977
2978         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2979         if (err)
2980                 return err;
2981         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2982         if (err)
2983                 goto out_move;
2984         atomic_dec(&cq->mtt->ref_count);
2985         res_end_move(dev, slave, RES_CQ, cqn);
2986         return 0;
2987
2988 out_move:
2989         res_abort_move(dev, slave, RES_CQ, cqn);
2990         return err;
2991 }
2992
2993 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2994                           struct mlx4_vhcr *vhcr,
2995                           struct mlx4_cmd_mailbox *inbox,
2996                           struct mlx4_cmd_mailbox *outbox,
2997                           struct mlx4_cmd_info *cmd)
2998 {
2999         int cqn = vhcr->in_modifier;
3000         struct res_cq *cq;
3001         int err;
3002
3003         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3004         if (err)
3005                 return err;
3006
3007         if (cq->com.from_state != RES_CQ_HW)
3008                 goto ex_put;
3009
3010         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3011 ex_put:
3012         put_res(dev, slave, cqn, RES_CQ);
3013
3014         return err;
3015 }
3016
3017 static int handle_resize(struct mlx4_dev *dev, int slave,
3018                          struct mlx4_vhcr *vhcr,
3019                          struct mlx4_cmd_mailbox *inbox,
3020                          struct mlx4_cmd_mailbox *outbox,
3021                          struct mlx4_cmd_info *cmd,
3022                          struct res_cq *cq)
3023 {
3024         int err;
3025         struct res_mtt *orig_mtt;
3026         struct res_mtt *mtt;
3027         struct mlx4_cq_context *cqc = inbox->buf;
3028         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3029
3030         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3031         if (err)
3032                 return err;
3033
3034         if (orig_mtt != cq->mtt) {
3035                 err = -EINVAL;
3036                 goto ex_put;
3037         }
3038
3039         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3040         if (err)
3041                 goto ex_put;
3042
3043         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3044         if (err)
3045                 goto ex_put1;
3046         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3047         if (err)
3048                 goto ex_put1;
3049         atomic_dec(&orig_mtt->ref_count);
3050         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3051         atomic_inc(&mtt->ref_count);
3052         cq->mtt = mtt;
3053         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3054         return 0;
3055
3056 ex_put1:
3057         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3058 ex_put:
3059         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3060
3061         return err;
3062
3063 }
3064
3065 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3066                            struct mlx4_vhcr *vhcr,
3067                            struct mlx4_cmd_mailbox *inbox,
3068                            struct mlx4_cmd_mailbox *outbox,
3069                            struct mlx4_cmd_info *cmd)
3070 {
3071         int cqn = vhcr->in_modifier;
3072         struct res_cq *cq;
3073         int err;
3074
3075         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3076         if (err)
3077                 return err;
3078
3079         if (cq->com.from_state != RES_CQ_HW)
3080                 goto ex_put;
3081
3082         if (vhcr->op_modifier == 0) {
3083                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3084                 goto ex_put;
3085         }
3086
3087         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3088 ex_put:
3089         put_res(dev, slave, cqn, RES_CQ);
3090
3091         return err;
3092 }
3093
3094 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3095 {
3096         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3097         int log_rq_stride = srqc->logstride & 7;
3098         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3099
3100         if (log_srq_size + log_rq_stride + 4 < page_shift)
3101                 return 1;
3102
3103         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3104 }
3105
3106 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3107                            struct mlx4_vhcr *vhcr,
3108                            struct mlx4_cmd_mailbox *inbox,
3109                            struct mlx4_cmd_mailbox *outbox,
3110                            struct mlx4_cmd_info *cmd)
3111 {
3112         int err;
3113         int srqn = vhcr->in_modifier;
3114         struct res_mtt *mtt;
3115         struct res_srq *srq;
3116         struct mlx4_srq_context *srqc = inbox->buf;
3117         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3118
3119         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3120                 return -EINVAL;
3121
3122         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3123         if (err)
3124                 return err;
3125         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3126         if (err)
3127                 goto ex_abort;
3128         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3129                               mtt);
3130         if (err)
3131                 goto ex_put_mtt;
3132
3133         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3134         if (err)
3135                 goto ex_put_mtt;
3136
3137         atomic_inc(&mtt->ref_count);
3138         srq->mtt = mtt;
3139         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3140         res_end_move(dev, slave, RES_SRQ, srqn);
3141         return 0;
3142
3143 ex_put_mtt:
3144         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3145 ex_abort:
3146         res_abort_move(dev, slave, RES_SRQ, srqn);
3147
3148         return err;
3149 }
3150
3151 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3152                            struct mlx4_vhcr *vhcr,
3153                            struct mlx4_cmd_mailbox *inbox,
3154                            struct mlx4_cmd_mailbox *outbox,
3155                            struct mlx4_cmd_info *cmd)
3156 {
3157         int err;
3158         int srqn = vhcr->in_modifier;
3159         struct res_srq *srq;
3160
3161         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3162         if (err)
3163                 return err;
3164         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3165         if (err)
3166                 goto ex_abort;
3167         atomic_dec(&srq->mtt->ref_count);
3168         if (srq->cq)
3169                 atomic_dec(&srq->cq->ref_count);
3170         res_end_move(dev, slave, RES_SRQ, srqn);
3171
3172         return 0;
3173
3174 ex_abort:
3175         res_abort_move(dev, slave, RES_SRQ, srqn);
3176
3177         return err;
3178 }
3179
3180 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3181                            struct mlx4_vhcr *vhcr,
3182                            struct mlx4_cmd_mailbox *inbox,
3183                            struct mlx4_cmd_mailbox *outbox,
3184                            struct mlx4_cmd_info *cmd)
3185 {
3186         int err;
3187         int srqn = vhcr->in_modifier;
3188         struct res_srq *srq;
3189
3190         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3191         if (err)
3192                 return err;
3193         if (srq->com.from_state != RES_SRQ_HW) {
3194                 err = -EBUSY;
3195                 goto out;
3196         }
3197         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3198 out:
3199         put_res(dev, slave, srqn, RES_SRQ);
3200         return err;
3201 }
3202
3203 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3204                          struct mlx4_vhcr *vhcr,
3205                          struct mlx4_cmd_mailbox *inbox,
3206                          struct mlx4_cmd_mailbox *outbox,
3207                          struct mlx4_cmd_info *cmd)
3208 {
3209         int err;
3210         int srqn = vhcr->in_modifier;
3211         struct res_srq *srq;
3212
3213         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3214         if (err)
3215                 return err;
3216
3217         if (srq->com.from_state != RES_SRQ_HW) {
3218                 err = -EBUSY;
3219                 goto out;
3220         }
3221
3222         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3223 out:
3224         put_res(dev, slave, srqn, RES_SRQ);
3225         return err;
3226 }
3227
3228 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3229                         struct mlx4_vhcr *vhcr,
3230                         struct mlx4_cmd_mailbox *inbox,
3231                         struct mlx4_cmd_mailbox *outbox,
3232                         struct mlx4_cmd_info *cmd)
3233 {
3234         int err;
3235         int qpn = vhcr->in_modifier & 0x7fffff;
3236         struct res_qp *qp;
3237
3238         err = get_res(dev, slave, qpn, RES_QP, &qp);
3239         if (err)
3240                 return err;
3241         if (qp->com.from_state != RES_QP_HW) {
3242                 err = -EBUSY;
3243                 goto out;
3244         }
3245
3246         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3247 out:
3248         put_res(dev, slave, qpn, RES_QP);
3249         return err;
3250 }
3251
3252 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3253                               struct mlx4_vhcr *vhcr,
3254                               struct mlx4_cmd_mailbox *inbox,
3255                               struct mlx4_cmd_mailbox *outbox,
3256                               struct mlx4_cmd_info *cmd)
3257 {
3258         struct mlx4_qp_context *context = inbox->buf + 8;
3259         adjust_proxy_tun_qkey(dev, vhcr, context);
3260         update_pkey_index(dev, slave, inbox);
3261         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3262 }
3263
3264 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3265                                 struct mlx4_qp_context *qpc,
3266                                 struct mlx4_cmd_mailbox *inbox)
3267 {
3268         u64 mac;
3269         int port;
3270         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3271         u8 sched = *(u8 *)(inbox->buf + 64);
3272         u8 smac_ix;
3273
3274         port = (sched >> 6 & 1) + 1;
3275         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3276                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3277                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3278                         return -ENOENT;
3279         }
3280         return 0;
3281 }
3282
3283 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3284                              struct mlx4_vhcr *vhcr,
3285                              struct mlx4_cmd_mailbox *inbox,
3286                              struct mlx4_cmd_mailbox *outbox,
3287                              struct mlx4_cmd_info *cmd)
3288 {
3289         int err;
3290         struct mlx4_qp_context *qpc = inbox->buf + 8;
3291
3292         err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3293         if (err)
3294                 return err;
3295
3296         if (roce_verify_mac(dev, slave, qpc, inbox))
3297                 return -EINVAL;
3298
3299         update_pkey_index(dev, slave, inbox);
3300         update_gid(dev, inbox, (u8)slave);
3301         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3302         err = update_vport_qp_param(dev, inbox, slave);
3303         if (err)
3304                 return err;
3305
3306         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3307 }
3308
3309 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3310                             struct mlx4_vhcr *vhcr,
3311                             struct mlx4_cmd_mailbox *inbox,
3312                             struct mlx4_cmd_mailbox *outbox,
3313                             struct mlx4_cmd_info *cmd)
3314 {
3315         int err;
3316         struct mlx4_qp_context *context = inbox->buf + 8;
3317
3318         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3319         if (err)
3320                 return err;
3321
3322         update_pkey_index(dev, slave, inbox);
3323         update_gid(dev, inbox, (u8)slave);
3324         adjust_proxy_tun_qkey(dev, vhcr, context);
3325         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3326 }
3327
3328 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3329                             struct mlx4_vhcr *vhcr,
3330                             struct mlx4_cmd_mailbox *inbox,
3331                             struct mlx4_cmd_mailbox *outbox,
3332                             struct mlx4_cmd_info *cmd)
3333 {
3334         int err;
3335         struct mlx4_qp_context *context = inbox->buf + 8;
3336
3337         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3338         if (err)
3339                 return err;
3340
3341         update_pkey_index(dev, slave, inbox);
3342         update_gid(dev, inbox, (u8)slave);
3343         adjust_proxy_tun_qkey(dev, vhcr, context);
3344         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3345 }
3346
3347
3348 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3349                               struct mlx4_vhcr *vhcr,
3350                               struct mlx4_cmd_mailbox *inbox,
3351                               struct mlx4_cmd_mailbox *outbox,
3352                               struct mlx4_cmd_info *cmd)
3353 {
3354         struct mlx4_qp_context *context = inbox->buf + 8;
3355         adjust_proxy_tun_qkey(dev, vhcr, context);
3356         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3357 }
3358
3359 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3360                             struct mlx4_vhcr *vhcr,
3361                             struct mlx4_cmd_mailbox *inbox,
3362                             struct mlx4_cmd_mailbox *outbox,
3363                             struct mlx4_cmd_info *cmd)
3364 {
3365         int err;
3366         struct mlx4_qp_context *context = inbox->buf + 8;
3367
3368         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3369         if (err)
3370                 return err;
3371
3372         adjust_proxy_tun_qkey(dev, vhcr, context);
3373         update_gid(dev, inbox, (u8)slave);
3374         update_pkey_index(dev, slave, inbox);
3375         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3376 }
3377
3378 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3379                             struct mlx4_vhcr *vhcr,
3380                             struct mlx4_cmd_mailbox *inbox,
3381                             struct mlx4_cmd_mailbox *outbox,
3382                             struct mlx4_cmd_info *cmd)
3383 {
3384         int err;
3385         struct mlx4_qp_context *context = inbox->buf + 8;
3386
3387         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3388         if (err)
3389                 return err;
3390
3391         adjust_proxy_tun_qkey(dev, vhcr, context);
3392         update_gid(dev, inbox, (u8)slave);
3393         update_pkey_index(dev, slave, inbox);
3394         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3395 }
3396
3397 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3398                          struct mlx4_vhcr *vhcr,
3399                          struct mlx4_cmd_mailbox *inbox,
3400                          struct mlx4_cmd_mailbox *outbox,
3401                          struct mlx4_cmd_info *cmd)
3402 {
3403         int err;
3404         int qpn = vhcr->in_modifier & 0x7fffff;
3405         struct res_qp *qp;
3406
3407         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3408         if (err)
3409                 return err;
3410         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3411         if (err)
3412                 goto ex_abort;
3413
3414         atomic_dec(&qp->mtt->ref_count);
3415         atomic_dec(&qp->rcq->ref_count);
3416         atomic_dec(&qp->scq->ref_count);
3417         if (qp->srq)
3418                 atomic_dec(&qp->srq->ref_count);
3419         res_end_move(dev, slave, RES_QP, qpn);
3420         return 0;
3421
3422 ex_abort:
3423         res_abort_move(dev, slave, RES_QP, qpn);
3424
3425         return err;
3426 }
3427
3428 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3429                                 struct res_qp *rqp, u8 *gid)
3430 {
3431         struct res_gid *res;
3432
3433         list_for_each_entry(res, &rqp->mcg_list, list) {
3434                 if (!memcmp(res->gid, gid, 16))
3435                         return res;
3436         }
3437         return NULL;
3438 }
3439
3440 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3441                        u8 *gid, enum mlx4_protocol prot,
3442                        enum mlx4_steer_type steer)
3443 {
3444         struct res_gid *res;
3445         int err;
3446
3447         res = kzalloc(sizeof *res, GFP_KERNEL);
3448         if (!res)
3449                 return -ENOMEM;
3450
3451         spin_lock_irq(&rqp->mcg_spl);
3452         if (find_gid(dev, slave, rqp, gid)) {
3453                 kfree(res);
3454                 err = -EEXIST;
3455         } else {
3456                 memcpy(res->gid, gid, 16);
3457                 res->prot = prot;
3458                 res->steer = steer;
3459                 list_add_tail(&res->list, &rqp->mcg_list);
3460                 err = 0;
3461         }
3462         spin_unlock_irq(&rqp->mcg_spl);
3463
3464         return err;
3465 }
3466
3467 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3468                        u8 *gid, enum mlx4_protocol prot,
3469                        enum mlx4_steer_type steer)
3470 {
3471         struct res_gid *res;
3472         int err;
3473
3474         spin_lock_irq(&rqp->mcg_spl);
3475         res = find_gid(dev, slave, rqp, gid);
3476         if (!res || res->prot != prot || res->steer != steer)
3477                 err = -EINVAL;
3478         else {
3479                 list_del(&res->list);
3480                 kfree(res);
3481                 err = 0;
3482         }
3483         spin_unlock_irq(&rqp->mcg_spl);
3484
3485         return err;
3486 }
3487
3488 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3489                                struct mlx4_vhcr *vhcr,
3490                                struct mlx4_cmd_mailbox *inbox,
3491                                struct mlx4_cmd_mailbox *outbox,
3492                                struct mlx4_cmd_info *cmd)
3493 {
3494         struct mlx4_qp qp; /* dummy for calling attach/detach */
3495         u8 *gid = inbox->buf;
3496         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3497         int err;
3498         int qpn;
3499         struct res_qp *rqp;
3500         int attach = vhcr->op_modifier;
3501         int block_loopback = vhcr->in_modifier >> 31;
3502         u8 steer_type_mask = 2;
3503         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3504
3505         qpn = vhcr->in_modifier & 0xffffff;
3506         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3507         if (err)
3508                 return err;
3509
3510         qp.qpn = qpn;
3511         if (attach) {
3512                 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
3513                 if (err)
3514                         goto ex_put;
3515
3516                 err = mlx4_qp_attach_common(dev, &qp, gid,
3517                                             block_loopback, prot, type);
3518                 if (err)
3519                         goto ex_rem;
3520         } else {
3521                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
3522                 if (err)
3523                         goto ex_put;
3524                 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
3525         }
3526
3527         put_res(dev, slave, qpn, RES_QP);
3528         return 0;
3529
3530 ex_rem:
3531         /* ignore error return below, already in error */
3532         (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
3533 ex_put:
3534         put_res(dev, slave, qpn, RES_QP);
3535
3536         return err;
3537 }
3538
3539 /*
3540  * MAC validation for Flow Steering rules.
3541  * VF can attach rules only with a mac address which is assigned to it.
3542  */
3543
3544 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3545                                    struct list_head *rlist)
3546 {
3547         struct mac_res *res, *tmp;
3548         __be64 be_mac;
3549
3550         /* make sure it isn't multicast or broadcast mac*/
3551         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3552             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3553                 list_for_each_entry_safe(res, tmp, rlist, list) {
3554                         be_mac = cpu_to_be64(res->mac << 16);
3555                         if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3556                                 return 0;
3557                 }
3558                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3559                        eth_header->eth.dst_mac, slave);
3560                 return -EINVAL;
3561         }
3562         return 0;
3563 }
3564
3565 /*
3566  * In case of missing eth header, append eth header with a MAC address
3567  * assigned to the VF.
3568  */
3569 static int add_eth_header(struct mlx4_dev *dev, int slave,
3570                           struct mlx4_cmd_mailbox *inbox,
3571                           struct list_head *rlist, int header_id)
3572 {
3573         struct mac_res *res, *tmp;
3574         u8 port;
3575         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3576         struct mlx4_net_trans_rule_hw_eth *eth_header;
3577         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3578         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3579         __be64 be_mac = 0;
3580         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3581
3582         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3583         port = ctrl->port;
3584         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3585
3586         /* Clear a space in the inbox for eth header */
3587         switch (header_id) {
3588         case MLX4_NET_TRANS_RULE_ID_IPV4:
3589                 ip_header =
3590                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3591                 memmove(ip_header, eth_header,
3592                         sizeof(*ip_header) + sizeof(*l4_header));
3593                 break;
3594         case MLX4_NET_TRANS_RULE_ID_TCP:
3595         case MLX4_NET_TRANS_RULE_ID_UDP:
3596                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3597                             (eth_header + 1);
3598                 memmove(l4_header, eth_header, sizeof(*l4_header));
3599                 break;
3600         default:
3601                 return -EINVAL;
3602         }
3603         list_for_each_entry_safe(res, tmp, rlist, list) {
3604                 if (port == res->port) {
3605                         be_mac = cpu_to_be64(res->mac << 16);
3606                         break;
3607                 }
3608         }
3609         if (!be_mac) {
3610                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3611                        port);
3612                 return -EINVAL;
3613         }
3614
3615         memset(eth_header, 0, sizeof(*eth_header));
3616         eth_header->size = sizeof(*eth_header) >> 2;
3617         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3618         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3619         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3620
3621         return 0;
3622
3623 }
3624
3625 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3626                                          struct mlx4_vhcr *vhcr,
3627                                          struct mlx4_cmd_mailbox *inbox,
3628                                          struct mlx4_cmd_mailbox *outbox,
3629                                          struct mlx4_cmd_info *cmd)
3630 {
3631
3632         struct mlx4_priv *priv = mlx4_priv(dev);
3633         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3634         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3635         int err;
3636         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3637         struct _rule_hw  *rule_header;
3638         int header_id;
3639
3640         if (dev->caps.steering_mode !=
3641             MLX4_STEERING_MODE_DEVICE_MANAGED)
3642                 return -EOPNOTSUPP;
3643
3644         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3645         rule_header = (struct _rule_hw *)(ctrl + 1);
3646         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3647
3648         switch (header_id) {
3649         case MLX4_NET_TRANS_RULE_ID_ETH:
3650                 if (validate_eth_header_mac(slave, rule_header, rlist))
3651                         return -EINVAL;
3652                 break;
3653         case MLX4_NET_TRANS_RULE_ID_IB:
3654                 break;
3655         case MLX4_NET_TRANS_RULE_ID_IPV4:
3656         case MLX4_NET_TRANS_RULE_ID_TCP:
3657         case MLX4_NET_TRANS_RULE_ID_UDP:
3658                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3659                 if (add_eth_header(dev, slave, inbox, rlist, header_id))
3660                         return -EINVAL;
3661                 vhcr->in_modifier +=
3662                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3663                 break;
3664         default:
3665                 pr_err("Corrupted mailbox.\n");
3666                 return -EINVAL;
3667         }
3668
3669         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3670                            vhcr->in_modifier, 0,
3671                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3672                            MLX4_CMD_NATIVE);
3673         if (err)
3674                 return err;
3675
3676         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
3677         if (err) {
3678                 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3679                 /* detach rule*/
3680                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3681                          MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3682                          MLX4_CMD_NATIVE);
3683         }
3684         return err;
3685 }
3686
3687 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3688                                          struct mlx4_vhcr *vhcr,
3689                                          struct mlx4_cmd_mailbox *inbox,
3690                                          struct mlx4_cmd_mailbox *outbox,
3691                                          struct mlx4_cmd_info *cmd)
3692 {
3693         int err;
3694
3695         if (dev->caps.steering_mode !=
3696             MLX4_STEERING_MODE_DEVICE_MANAGED)
3697                 return -EOPNOTSUPP;
3698
3699         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3700         if (err) {
3701                 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3702                 return err;
3703         }
3704
3705         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3706                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3707                        MLX4_CMD_NATIVE);
3708         return err;
3709 }
3710
3711 enum {
3712         BUSY_MAX_RETRIES = 10
3713 };
3714
3715 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3716                                struct mlx4_vhcr *vhcr,
3717                                struct mlx4_cmd_mailbox *inbox,
3718                                struct mlx4_cmd_mailbox *outbox,
3719                                struct mlx4_cmd_info *cmd)
3720 {
3721         int err;
3722         int index = vhcr->in_modifier & 0xffff;
3723
3724         err = get_res(dev, slave, index, RES_COUNTER, NULL);
3725         if (err)
3726                 return err;
3727
3728         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3729         put_res(dev, slave, index, RES_COUNTER);
3730         return err;
3731 }
3732
3733 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3734 {
3735         struct res_gid *rgid;
3736         struct res_gid *tmp;
3737         struct mlx4_qp qp; /* dummy for calling attach/detach */
3738
3739         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3740                 qp.qpn = rqp->local_qpn;
3741                 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
3742                                              rgid->steer);
3743                 list_del(&rgid->list);
3744                 kfree(rgid);
3745         }
3746 }
3747
3748 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3749                           enum mlx4_resource type, int print)
3750 {
3751         struct mlx4_priv *priv = mlx4_priv(dev);
3752         struct mlx4_resource_tracker *tracker =
3753                 &priv->mfunc.master.res_tracker;
3754         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3755         struct res_common *r;
3756         struct res_common *tmp;
3757         int busy;
3758
3759         busy = 0;
3760         spin_lock_irq(mlx4_tlock(dev));
3761         list_for_each_entry_safe(r, tmp, rlist, list) {
3762                 if (r->owner == slave) {
3763                         if (!r->removing) {
3764                                 if (r->state == RES_ANY_BUSY) {
3765                                         if (print)
3766                                                 mlx4_dbg(dev,
3767                                                          "%s id 0x%llx is busy\n",
3768                                                           ResourceType(type),
3769                                                           r->res_id);
3770                                         ++busy;
3771                                 } else {
3772                                         r->from_state = r->state;
3773                                         r->state = RES_ANY_BUSY;
3774                                         r->removing = 1;
3775                                 }
3776                         }
3777                 }
3778         }
3779         spin_unlock_irq(mlx4_tlock(dev));
3780
3781         return busy;
3782 }
3783
3784 static int move_all_busy(struct mlx4_dev *dev, int slave,
3785                          enum mlx4_resource type)
3786 {
3787         unsigned long begin;
3788         int busy;
3789
3790         begin = jiffies;
3791         do {
3792                 busy = _move_all_busy(dev, slave, type, 0);
3793                 if (time_after(jiffies, begin + 5 * HZ))
3794                         break;
3795                 if (busy)
3796                         cond_resched();
3797         } while (busy);
3798
3799         if (busy)
3800                 busy = _move_all_busy(dev, slave, type, 1);
3801
3802         return busy;
3803 }
3804 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3805 {
3806         struct mlx4_priv *priv = mlx4_priv(dev);
3807         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3808         struct list_head *qp_list =
3809                 &tracker->slave_list[slave].res_list[RES_QP];
3810         struct res_qp *qp;
3811         struct res_qp *tmp;
3812         int state;
3813         u64 in_param;
3814         int qpn;
3815         int err;
3816
3817         err = move_all_busy(dev, slave, RES_QP);
3818         if (err)
3819                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3820                           "for slave %d\n", slave);
3821
3822         spin_lock_irq(mlx4_tlock(dev));
3823         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3824                 spin_unlock_irq(mlx4_tlock(dev));
3825                 if (qp->com.owner == slave) {
3826                         qpn = qp->com.res_id;
3827                         detach_qp(dev, slave, qp);
3828                         state = qp->com.from_state;
3829                         while (state != 0) {
3830                                 switch (state) {
3831                                 case RES_QP_RESERVED:
3832                                         spin_lock_irq(mlx4_tlock(dev));
3833                                         rb_erase(&qp->com.node,
3834                                                  &tracker->res_tree[RES_QP]);
3835                                         list_del(&qp->com.list);
3836                                         spin_unlock_irq(mlx4_tlock(dev));
3837                                         kfree(qp);
3838                                         state = 0;
3839                                         break;
3840                                 case RES_QP_MAPPED:
3841                                         if (!valid_reserved(dev, slave, qpn))
3842                                                 __mlx4_qp_free_icm(dev, qpn);
3843                                         state = RES_QP_RESERVED;
3844                                         break;
3845                                 case RES_QP_HW:
3846                                         in_param = slave;
3847                                         err = mlx4_cmd(dev, in_param,
3848                                                        qp->local_qpn, 2,
3849                                                        MLX4_CMD_2RST_QP,
3850                                                        MLX4_CMD_TIME_CLASS_A,
3851                                                        MLX4_CMD_NATIVE);
3852                                         if (err)
3853                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
3854                                                          " to move slave %d qpn %d to"
3855                                                          " reset\n", slave,
3856                                                          qp->local_qpn);
3857                                         atomic_dec(&qp->rcq->ref_count);
3858                                         atomic_dec(&qp->scq->ref_count);
3859                                         atomic_dec(&qp->mtt->ref_count);
3860                                         if (qp->srq)
3861                                                 atomic_dec(&qp->srq->ref_count);
3862                                         state = RES_QP_MAPPED;
3863                                         break;
3864                                 default:
3865                                         state = 0;
3866                                 }
3867                         }
3868                 }
3869                 spin_lock_irq(mlx4_tlock(dev));
3870         }
3871         spin_unlock_irq(mlx4_tlock(dev));
3872 }
3873
3874 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3875 {
3876         struct mlx4_priv *priv = mlx4_priv(dev);
3877         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3878         struct list_head *srq_list =
3879                 &tracker->slave_list[slave].res_list[RES_SRQ];
3880         struct res_srq *srq;
3881         struct res_srq *tmp;
3882         int state;
3883         u64 in_param;
3884         LIST_HEAD(tlist);
3885         int srqn;
3886         int err;
3887
3888         err = move_all_busy(dev, slave, RES_SRQ);
3889         if (err)
3890                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3891                           "busy for slave %d\n", slave);
3892
3893         spin_lock_irq(mlx4_tlock(dev));
3894         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3895                 spin_unlock_irq(mlx4_tlock(dev));
3896                 if (srq->com.owner == slave) {
3897                         srqn = srq->com.res_id;
3898                         state = srq->com.from_state;
3899                         while (state != 0) {
3900                                 switch (state) {
3901                                 case RES_SRQ_ALLOCATED:
3902                                         __mlx4_srq_free_icm(dev, srqn);
3903                                         spin_lock_irq(mlx4_tlock(dev));
3904                                         rb_erase(&srq->com.node,
3905                                                  &tracker->res_tree[RES_SRQ]);
3906                                         list_del(&srq->com.list);
3907                                         spin_unlock_irq(mlx4_tlock(dev));
3908                                         kfree(srq);
3909                                         state = 0;
3910                                         break;
3911
3912                                 case RES_SRQ_HW:
3913                                         in_param = slave;
3914                                         err = mlx4_cmd(dev, in_param, srqn, 1,
3915                                                        MLX4_CMD_HW2SW_SRQ,
3916                                                        MLX4_CMD_TIME_CLASS_A,
3917                                                        MLX4_CMD_NATIVE);
3918                                         if (err)
3919                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
3920                                                          " to move slave %d srq %d to"
3921                                                          " SW ownership\n",
3922                                                          slave, srqn);
3923
3924                                         atomic_dec(&srq->mtt->ref_count);
3925                                         if (srq->cq)
3926                                                 atomic_dec(&srq->cq->ref_count);
3927                                         state = RES_SRQ_ALLOCATED;
3928                                         break;
3929
3930                                 default:
3931                                         state = 0;
3932                                 }
3933                         }
3934                 }
3935                 spin_lock_irq(mlx4_tlock(dev));
3936         }
3937         spin_unlock_irq(mlx4_tlock(dev));
3938 }
3939
3940 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3941 {
3942         struct mlx4_priv *priv = mlx4_priv(dev);
3943         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3944         struct list_head *cq_list =
3945                 &tracker->slave_list[slave].res_list[RES_CQ];
3946         struct res_cq *cq;
3947         struct res_cq *tmp;
3948         int state;
3949         u64 in_param;
3950         LIST_HEAD(tlist);
3951         int cqn;
3952         int err;
3953
3954         err = move_all_busy(dev, slave, RES_CQ);
3955         if (err)
3956                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3957                           "busy for slave %d\n", slave);
3958
3959         spin_lock_irq(mlx4_tlock(dev));
3960         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3961                 spin_unlock_irq(mlx4_tlock(dev));
3962                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3963                         cqn = cq->com.res_id;
3964                         state = cq->com.from_state;
3965                         while (state != 0) {
3966                                 switch (state) {
3967                                 case RES_CQ_ALLOCATED:
3968                                         __mlx4_cq_free_icm(dev, cqn);
3969                                         spin_lock_irq(mlx4_tlock(dev));
3970                                         rb_erase(&cq->com.node,
3971                                                  &tracker->res_tree[RES_CQ]);
3972                                         list_del(&cq->com.list);
3973                                         spin_unlock_irq(mlx4_tlock(dev));
3974                                         kfree(cq);
3975                                         state = 0;
3976                                         break;
3977
3978                                 case RES_CQ_HW:
3979                                         in_param = slave;
3980                                         err = mlx4_cmd(dev, in_param, cqn, 1,
3981                                                        MLX4_CMD_HW2SW_CQ,
3982                                                        MLX4_CMD_TIME_CLASS_A,
3983                                                        MLX4_CMD_NATIVE);
3984                                         if (err)
3985                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
3986                                                          " to move slave %d cq %d to"
3987                                                          " SW ownership\n",
3988                                                          slave, cqn);
3989                                         atomic_dec(&cq->mtt->ref_count);
3990                                         state = RES_CQ_ALLOCATED;
3991                                         break;
3992
3993                                 default:
3994                                         state = 0;
3995                                 }
3996                         }
3997                 }
3998                 spin_lock_irq(mlx4_tlock(dev));
3999         }
4000         spin_unlock_irq(mlx4_tlock(dev));
4001 }
4002
4003 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4004 {
4005         struct mlx4_priv *priv = mlx4_priv(dev);
4006         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4007         struct list_head *mpt_list =
4008                 &tracker->slave_list[slave].res_list[RES_MPT];
4009         struct res_mpt *mpt;
4010         struct res_mpt *tmp;
4011         int state;
4012         u64 in_param;
4013         LIST_HEAD(tlist);
4014         int mptn;
4015         int err;
4016
4017         err = move_all_busy(dev, slave, RES_MPT);
4018         if (err)
4019                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4020                           "busy for slave %d\n", slave);
4021
4022         spin_lock_irq(mlx4_tlock(dev));
4023         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4024                 spin_unlock_irq(mlx4_tlock(dev));
4025                 if (mpt->com.owner == slave) {
4026                         mptn = mpt->com.res_id;
4027                         state = mpt->com.from_state;
4028                         while (state != 0) {
4029                                 switch (state) {
4030                                 case RES_MPT_RESERVED:
4031                                         __mlx4_mr_release(dev, mpt->key);
4032                                         spin_lock_irq(mlx4_tlock(dev));
4033                                         rb_erase(&mpt->com.node,
4034                                                  &tracker->res_tree[RES_MPT]);
4035                                         list_del(&mpt->com.list);
4036                                         spin_unlock_irq(mlx4_tlock(dev));
4037                                         kfree(mpt);
4038                                         state = 0;
4039                                         break;
4040
4041                                 case RES_MPT_MAPPED:
4042                                         __mlx4_mr_free_icm(dev, mpt->key);
4043                                         state = RES_MPT_RESERVED;
4044                                         break;
4045
4046                                 case RES_MPT_HW:
4047                                         in_param = slave;
4048                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4049                                                      MLX4_CMD_HW2SW_MPT,
4050                                                      MLX4_CMD_TIME_CLASS_A,
4051                                                      MLX4_CMD_NATIVE);
4052                                         if (err)
4053                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
4054                                                          " to move slave %d mpt %d to"
4055                                                          " SW ownership\n",
4056                                                          slave, mptn);
4057                                         if (mpt->mtt)
4058                                                 atomic_dec(&mpt->mtt->ref_count);
4059                                         state = RES_MPT_MAPPED;
4060                                         break;
4061                                 default:
4062                                         state = 0;
4063                                 }
4064                         }
4065                 }
4066                 spin_lock_irq(mlx4_tlock(dev));
4067         }
4068         spin_unlock_irq(mlx4_tlock(dev));
4069 }
4070
4071 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4072 {
4073         struct mlx4_priv *priv = mlx4_priv(dev);
4074         struct mlx4_resource_tracker *tracker =
4075                 &priv->mfunc.master.res_tracker;
4076         struct list_head *mtt_list =
4077                 &tracker->slave_list[slave].res_list[RES_MTT];
4078         struct res_mtt *mtt;
4079         struct res_mtt *tmp;
4080         int state;
4081         LIST_HEAD(tlist);
4082         int base;
4083         int err;
4084
4085         err = move_all_busy(dev, slave, RES_MTT);
4086         if (err)
4087                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4088                           "busy for slave %d\n", slave);
4089
4090         spin_lock_irq(mlx4_tlock(dev));
4091         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4092                 spin_unlock_irq(mlx4_tlock(dev));
4093                 if (mtt->com.owner == slave) {
4094                         base = mtt->com.res_id;
4095                         state = mtt->com.from_state;
4096                         while (state != 0) {
4097                                 switch (state) {
4098                                 case RES_MTT_ALLOCATED:
4099                                         __mlx4_free_mtt_range(dev, base,
4100                                                               mtt->order);
4101                                         spin_lock_irq(mlx4_tlock(dev));
4102                                         rb_erase(&mtt->com.node,
4103                                                  &tracker->res_tree[RES_MTT]);
4104                                         list_del(&mtt->com.list);
4105                                         spin_unlock_irq(mlx4_tlock(dev));
4106                                         kfree(mtt);
4107                                         state = 0;
4108                                         break;
4109
4110                                 default:
4111                                         state = 0;
4112                                 }
4113                         }
4114                 }
4115                 spin_lock_irq(mlx4_tlock(dev));
4116         }
4117         spin_unlock_irq(mlx4_tlock(dev));
4118 }
4119
4120 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4121 {
4122         struct mlx4_priv *priv = mlx4_priv(dev);
4123         struct mlx4_resource_tracker *tracker =
4124                 &priv->mfunc.master.res_tracker;
4125         struct list_head *fs_rule_list =
4126                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4127         struct res_fs_rule *fs_rule;
4128         struct res_fs_rule *tmp;
4129         int state;
4130         u64 base;
4131         int err;
4132
4133         err = move_all_busy(dev, slave, RES_FS_RULE);
4134         if (err)
4135                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4136                           slave);
4137
4138         spin_lock_irq(mlx4_tlock(dev));
4139         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4140                 spin_unlock_irq(mlx4_tlock(dev));
4141                 if (fs_rule->com.owner == slave) {
4142                         base = fs_rule->com.res_id;
4143                         state = fs_rule->com.from_state;
4144                         while (state != 0) {
4145                                 switch (state) {
4146                                 case RES_FS_RULE_ALLOCATED:
4147                                         /* detach rule */
4148                                         err = mlx4_cmd(dev, base, 0, 0,
4149                                                        MLX4_QP_FLOW_STEERING_DETACH,
4150                                                        MLX4_CMD_TIME_CLASS_A,
4151                                                        MLX4_CMD_NATIVE);
4152
4153                                         spin_lock_irq(mlx4_tlock(dev));
4154                                         rb_erase(&fs_rule->com.node,
4155                                                  &tracker->res_tree[RES_FS_RULE]);
4156                                         list_del(&fs_rule->com.list);
4157                                         spin_unlock_irq(mlx4_tlock(dev));
4158                                         kfree(fs_rule);
4159                                         state = 0;
4160                                         break;
4161
4162                                 default:
4163                                         state = 0;
4164                                 }
4165                         }
4166                 }
4167                 spin_lock_irq(mlx4_tlock(dev));
4168         }
4169         spin_unlock_irq(mlx4_tlock(dev));
4170 }
4171
4172 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4173 {
4174         struct mlx4_priv *priv = mlx4_priv(dev);
4175         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4176         struct list_head *eq_list =
4177                 &tracker->slave_list[slave].res_list[RES_EQ];
4178         struct res_eq *eq;
4179         struct res_eq *tmp;
4180         int err;
4181         int state;
4182         LIST_HEAD(tlist);
4183         int eqn;
4184         struct mlx4_cmd_mailbox *mailbox;
4185
4186         err = move_all_busy(dev, slave, RES_EQ);
4187         if (err)
4188                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4189                           "busy for slave %d\n", slave);
4190
4191         spin_lock_irq(mlx4_tlock(dev));
4192         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4193                 spin_unlock_irq(mlx4_tlock(dev));
4194                 if (eq->com.owner == slave) {
4195                         eqn = eq->com.res_id;
4196                         state = eq->com.from_state;
4197                         while (state != 0) {
4198                                 switch (state) {
4199                                 case RES_EQ_RESERVED:
4200                                         spin_lock_irq(mlx4_tlock(dev));
4201                                         rb_erase(&eq->com.node,
4202                                                  &tracker->res_tree[RES_EQ]);
4203                                         list_del(&eq->com.list);
4204                                         spin_unlock_irq(mlx4_tlock(dev));
4205                                         kfree(eq);
4206                                         state = 0;
4207                                         break;
4208
4209                                 case RES_EQ_HW:
4210                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
4211                                         if (IS_ERR(mailbox)) {
4212                                                 cond_resched();
4213                                                 continue;
4214                                         }
4215                                         err = mlx4_cmd_box(dev, slave, 0,
4216                                                            eqn & 0xff, 0,
4217                                                            MLX4_CMD_HW2SW_EQ,
4218                                                            MLX4_CMD_TIME_CLASS_A,
4219                                                            MLX4_CMD_NATIVE);
4220                                         if (err)
4221                                                 mlx4_dbg(dev, "rem_slave_eqs: failed"
4222                                                          " to move slave %d eqs %d to"
4223                                                          " SW ownership\n", slave, eqn);
4224                                         mlx4_free_cmd_mailbox(dev, mailbox);
4225                                         atomic_dec(&eq->mtt->ref_count);
4226                                         state = RES_EQ_RESERVED;
4227                                         break;
4228
4229                                 default:
4230                                         state = 0;
4231                                 }
4232                         }
4233                 }
4234                 spin_lock_irq(mlx4_tlock(dev));
4235         }
4236         spin_unlock_irq(mlx4_tlock(dev));
4237 }
4238
4239 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4240 {
4241         struct mlx4_priv *priv = mlx4_priv(dev);
4242         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4243         struct list_head *counter_list =
4244                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4245         struct res_counter *counter;
4246         struct res_counter *tmp;
4247         int err;
4248         int index;
4249
4250         err = move_all_busy(dev, slave, RES_COUNTER);
4251         if (err)
4252                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4253                           "busy for slave %d\n", slave);
4254
4255         spin_lock_irq(mlx4_tlock(dev));
4256         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4257                 if (counter->com.owner == slave) {
4258                         index = counter->com.res_id;
4259                         rb_erase(&counter->com.node,
4260                                  &tracker->res_tree[RES_COUNTER]);
4261                         list_del(&counter->com.list);
4262                         kfree(counter);
4263                         __mlx4_counter_free(dev, index);
4264                 }
4265         }
4266         spin_unlock_irq(mlx4_tlock(dev));
4267 }
4268
4269 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4270 {
4271         struct mlx4_priv *priv = mlx4_priv(dev);
4272         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4273         struct list_head *xrcdn_list =
4274                 &tracker->slave_list[slave].res_list[RES_XRCD];
4275         struct res_xrcdn *xrcd;
4276         struct res_xrcdn *tmp;
4277         int err;
4278         int xrcdn;
4279
4280         err = move_all_busy(dev, slave, RES_XRCD);
4281         if (err)
4282                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4283                           "busy for slave %d\n", slave);
4284
4285         spin_lock_irq(mlx4_tlock(dev));
4286         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4287                 if (xrcd->com.owner == slave) {
4288                         xrcdn = xrcd->com.res_id;
4289                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4290                         list_del(&xrcd->com.list);
4291                         kfree(xrcd);
4292                         __mlx4_xrcd_free(dev, xrcdn);
4293                 }
4294         }
4295         spin_unlock_irq(mlx4_tlock(dev));
4296 }
4297
4298 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4299 {
4300         struct mlx4_priv *priv = mlx4_priv(dev);
4301
4302         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4303         rem_slave_macs(dev, slave);
4304         rem_slave_vlans(dev, slave);
4305         rem_slave_qps(dev, slave);
4306         rem_slave_srqs(dev, slave);
4307         rem_slave_cqs(dev, slave);
4308         rem_slave_mrs(dev, slave);
4309         rem_slave_eqs(dev, slave);
4310         rem_slave_mtts(dev, slave);
4311         rem_slave_counters(dev, slave);
4312         rem_slave_xrcdns(dev, slave);
4313         rem_slave_fs_rule(dev, slave);
4314         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4315 }