]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mlx4/mlx4_core/mlx4_cmd.c
MFV r362565:
[FreeBSD/FreeBSD.git] / sys / dev / mlx4 / mlx4_core / mlx4_cmd.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
40 #include <linux/delay.h>
41
42 #include <dev/mlx4/cmd.h>
43 #include <dev/mlx4/device.h>
44 #include <linux/semaphore.h>
45 #include <rdma/ib_smi.h>
46
47 #include <asm/io.h>
48 #include <linux/ktime.h>
49
50 #include "mlx4.h"
51 #include "fw.h"
52 #include "fw_qos.h"
53
54 #define CMD_POLL_TOKEN 0xffff
55 #define INBOX_MASK      0xffffffffffffff00ULL
56
57 #define CMD_CHAN_VER 1
58 #define CMD_CHAN_IF_REV 1
59
60 enum {
61         /* command completed successfully: */
62         CMD_STAT_OK             = 0x00,
63         /* Internal error (such as a bus error) occurred while processing command: */
64         CMD_STAT_INTERNAL_ERR   = 0x01,
65         /* Operation/command not supported or opcode modifier not supported: */
66         CMD_STAT_BAD_OP         = 0x02,
67         /* Parameter not supported or parameter out of range: */
68         CMD_STAT_BAD_PARAM      = 0x03,
69         /* System not enabled or bad system state: */
70         CMD_STAT_BAD_SYS_STATE  = 0x04,
71         /* Attempt to access reserved or unallocaterd resource: */
72         CMD_STAT_BAD_RESOURCE   = 0x05,
73         /* Requested resource is currently executing a command, or is otherwise busy: */
74         CMD_STAT_RESOURCE_BUSY  = 0x06,
75         /* Required capability exceeds device limits: */
76         CMD_STAT_EXCEED_LIM     = 0x08,
77         /* Resource is not in the appropriate state or ownership: */
78         CMD_STAT_BAD_RES_STATE  = 0x09,
79         /* Index out of range: */
80         CMD_STAT_BAD_INDEX      = 0x0a,
81         /* FW image corrupted: */
82         CMD_STAT_BAD_NVMEM      = 0x0b,
83         /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
84         CMD_STAT_ICM_ERROR      = 0x0c,
85         /* Attempt to modify a QP/EE which is not in the presumed state: */
86         CMD_STAT_BAD_QP_STATE   = 0x10,
87         /* Bad segment parameters (Address/Size): */
88         CMD_STAT_BAD_SEG_PARAM  = 0x20,
89         /* Memory Region has Memory Windows bound to: */
90         CMD_STAT_REG_BOUND      = 0x21,
91         /* HCA local attached memory not present: */
92         CMD_STAT_LAM_NOT_PRE    = 0x22,
93         /* Bad management packet (silently discarded): */
94         CMD_STAT_BAD_PKT        = 0x30,
95         /* More outstanding CQEs in CQ than new CQ size: */
96         CMD_STAT_BAD_SIZE       = 0x40,
97         /* Multi Function device support required: */
98         CMD_STAT_MULTI_FUNC_REQ = 0x50,
99 };
100
101 enum {
102         HCR_IN_PARAM_OFFSET     = 0x00,
103         HCR_IN_MODIFIER_OFFSET  = 0x08,
104         HCR_OUT_PARAM_OFFSET    = 0x0c,
105         HCR_TOKEN_OFFSET        = 0x14,
106         HCR_STATUS_OFFSET       = 0x18,
107
108         HCR_OPMOD_SHIFT         = 12,
109         HCR_T_BIT               = 21,
110         HCR_E_BIT               = 22,
111         HCR_GO_BIT              = 23
112 };
113
114 enum {
115         GO_BIT_TIMEOUT_MSECS    = 10000
116 };
117
118 enum mlx4_vlan_transition {
119         MLX4_VLAN_TRANSITION_VST_VST = 0,
120         MLX4_VLAN_TRANSITION_VST_VGT = 1,
121         MLX4_VLAN_TRANSITION_VGT_VST = 2,
122         MLX4_VLAN_TRANSITION_VGT_VGT = 3,
123 };
124
125
126 struct mlx4_cmd_context {
127         struct completion       done;
128         int                     result;
129         int                     next;
130         u64                     out_param;
131         u16                     token;
132         u8                      fw_status;
133 };
134
135 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
136                                     struct mlx4_vhcr_cmd *in_vhcr);
137
138 static int mlx4_status_to_errno(u8 status)
139 {
140         static const int trans_table[] = {
141                 [CMD_STAT_INTERNAL_ERR]   = -EIO,
142                 [CMD_STAT_BAD_OP]         = -EPERM,
143                 [CMD_STAT_BAD_PARAM]      = -EINVAL,
144                 [CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
145                 [CMD_STAT_BAD_RESOURCE]   = -EBADF,
146                 [CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
147                 [CMD_STAT_EXCEED_LIM]     = -ENOMEM,
148                 [CMD_STAT_BAD_RES_STATE]  = -EBADF,
149                 [CMD_STAT_BAD_INDEX]      = -EBADF,
150                 [CMD_STAT_BAD_NVMEM]      = -EFAULT,
151                 [CMD_STAT_ICM_ERROR]      = -ENFILE,
152                 [CMD_STAT_BAD_QP_STATE]   = -EINVAL,
153                 [CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
154                 [CMD_STAT_REG_BOUND]      = -EBUSY,
155                 [CMD_STAT_LAM_NOT_PRE]    = -EAGAIN,
156                 [CMD_STAT_BAD_PKT]        = -EINVAL,
157                 [CMD_STAT_BAD_SIZE]       = -ENOMEM,
158                 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
159         };
160
161         if (status >= ARRAY_SIZE(trans_table) ||
162             (status != CMD_STAT_OK && trans_table[status] == 0))
163                 return -EIO;
164
165         return trans_table[status];
166 }
167
168 static u8 mlx4_errno_to_status(int errno)
169 {
170         switch (errno) {
171         case -EPERM:
172                 return CMD_STAT_BAD_OP;
173         case -EINVAL:
174                 return CMD_STAT_BAD_PARAM;
175         case -ENXIO:
176                 return CMD_STAT_BAD_SYS_STATE;
177         case -EBUSY:
178                 return CMD_STAT_RESOURCE_BUSY;
179         case -ENOMEM:
180                 return CMD_STAT_EXCEED_LIM;
181         case -ENFILE:
182                 return CMD_STAT_ICM_ERROR;
183         default:
184                 return CMD_STAT_INTERNAL_ERR;
185         }
186 }
187
188 static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
189                                        u8 op_modifier)
190 {
191         switch (op) {
192         case MLX4_CMD_UNMAP_ICM:
193         case MLX4_CMD_UNMAP_ICM_AUX:
194         case MLX4_CMD_UNMAP_FA:
195         case MLX4_CMD_2RST_QP:
196         case MLX4_CMD_HW2SW_EQ:
197         case MLX4_CMD_HW2SW_CQ:
198         case MLX4_CMD_HW2SW_SRQ:
199         case MLX4_CMD_HW2SW_MPT:
200         case MLX4_CMD_CLOSE_HCA:
201         case MLX4_QP_FLOW_STEERING_DETACH:
202         case MLX4_CMD_FREE_RES:
203         case MLX4_CMD_CLOSE_PORT:
204                 return CMD_STAT_OK;
205
206         case MLX4_CMD_QP_ATTACH:
207                 /* On Detach case return success */
208                 if (op_modifier == 0)
209                         return CMD_STAT_OK;
210                 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
211
212         default:
213                 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
214         }
215 }
216
217 static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
218 {
219         /* Any error during the closing commands below is considered fatal */
220         if (op == MLX4_CMD_CLOSE_HCA ||
221             op == MLX4_CMD_HW2SW_EQ ||
222             op == MLX4_CMD_HW2SW_CQ ||
223             op == MLX4_CMD_2RST_QP ||
224             op == MLX4_CMD_HW2SW_SRQ ||
225             op == MLX4_CMD_SYNC_TPT ||
226             op == MLX4_CMD_UNMAP_ICM ||
227             op == MLX4_CMD_UNMAP_ICM_AUX ||
228             op == MLX4_CMD_UNMAP_FA)
229                 return 1;
230         /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
231           * CMD_STAT_REG_BOUND.
232           * This status indicates that memory region has memory windows bound to it
233           * which may result from invalid user space usage and is not fatal.
234           */
235         if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
236                 return 1;
237         return 0;
238 }
239
240 static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
241                                int err)
242 {
243         /* Only if reset flow is really active return code is based on
244           * command, otherwise current error code is returned.
245           */
246         if (mlx4_internal_err_reset) {
247                 mlx4_enter_error_state(dev->persist);
248                 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
249         }
250
251         return err;
252 }
253
254 static int comm_pending(struct mlx4_dev *dev)
255 {
256         struct mlx4_priv *priv = mlx4_priv(dev);
257         u32 status = readl(&priv->mfunc.comm->slave_read);
258
259         return (swab32(status) >> 31) != priv->cmd.comm_toggle;
260 }
261
262 static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
263 {
264         struct mlx4_priv *priv = mlx4_priv(dev);
265         u32 val;
266
267         /* To avoid writing to unknown addresses after the device state was
268          * changed to internal error and the function was rest,
269          * check the INTERNAL_ERROR flag which is updated under
270          * device_state_mutex lock.
271          */
272         mutex_lock(&dev->persist->device_state_mutex);
273
274         if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
275                 mutex_unlock(&dev->persist->device_state_mutex);
276                 return -EIO;
277         }
278
279         priv->cmd.comm_toggle ^= 1;
280         val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
281         __raw_writel((__force u32) cpu_to_be32(val),
282                      &priv->mfunc.comm->slave_write);
283         mmiowb();
284         mutex_unlock(&dev->persist->device_state_mutex);
285         return 0;
286 }
287
288 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
289                        unsigned long timeout)
290 {
291         struct mlx4_priv *priv = mlx4_priv(dev);
292         unsigned long end;
293         int err = 0;
294         int ret_from_pending = 0;
295
296         /* First, verify that the master reports correct status */
297         if (comm_pending(dev)) {
298                 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
299                           priv->cmd.comm_toggle, cmd);
300                 return -EAGAIN;
301         }
302
303         /* Write command */
304         down(&priv->cmd.poll_sem);
305         if (mlx4_comm_cmd_post(dev, cmd, param)) {
306                 /* Only in case the device state is INTERNAL_ERROR,
307                  * mlx4_comm_cmd_post returns with an error
308                  */
309                 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
310                 goto out;
311         }
312
313         end = msecs_to_jiffies(timeout) + jiffies;
314         while (comm_pending(dev) && time_before(jiffies, end))
315                 cond_resched();
316         ret_from_pending = comm_pending(dev);
317         if (ret_from_pending) {
318                 /* check if the slave is trying to boot in the middle of
319                  * FLR process. The only non-zero result in the RESET command
320                  * is MLX4_DELAY_RESET_SLAVE*/
321                 if ((MLX4_COMM_CMD_RESET == cmd)) {
322                         err = MLX4_DELAY_RESET_SLAVE;
323                         goto out;
324                 } else {
325                         mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
326                                   cmd);
327                         err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
328                 }
329         }
330
331         if (err)
332                 mlx4_enter_error_state(dev->persist);
333 out:
334         up(&priv->cmd.poll_sem);
335         return err;
336 }
337
338 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
339                               u16 param, u16 op, unsigned long timeout)
340 {
341         struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
342         struct mlx4_cmd_context *context;
343         unsigned long end;
344         int err = 0;
345
346         down(&cmd->event_sem);
347
348         spin_lock(&cmd->context_lock);
349         BUG_ON(cmd->free_head < 0);
350         context = &cmd->context[cmd->free_head];
351         context->token += cmd->token_mask + 1;
352         cmd->free_head = context->next;
353         spin_unlock(&cmd->context_lock);
354
355         reinit_completion(&context->done);
356
357         if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
358                 /* Only in case the device state is INTERNAL_ERROR,
359                  * mlx4_comm_cmd_post returns with an error
360                  */
361                 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
362                 goto out;
363         }
364
365         if (!wait_for_completion_timeout(&context->done,
366                                          msecs_to_jiffies(timeout))) {
367                 mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
368                           vhcr_cmd, op);
369                 goto out_reset;
370         }
371
372         err = context->result;
373         if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
374                 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
375                          vhcr_cmd, context->fw_status);
376                 if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
377                         goto out_reset;
378         }
379
380         /* wait for comm channel ready
381          * this is necessary for prevention the race
382          * when switching between event to polling mode
383          * Skipping this section in case the device is in FATAL_ERROR state,
384          * In this state, no commands are sent via the comm channel until
385          * the device has returned from reset.
386          */
387         if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
388                 end = msecs_to_jiffies(timeout) + jiffies;
389                 while (comm_pending(dev) && time_before(jiffies, end))
390                         cond_resched();
391         }
392         goto out;
393
394 out_reset:
395         err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
396         mlx4_enter_error_state(dev->persist);
397 out:
398         spin_lock(&cmd->context_lock);
399         context->next = cmd->free_head;
400         cmd->free_head = context - cmd->context;
401         spin_unlock(&cmd->context_lock);
402
403         up(&cmd->event_sem);
404         return err;
405 }
406
407 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
408                   u16 op, unsigned long timeout)
409 {
410         if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
411                 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
412
413         if (mlx4_priv(dev)->cmd.use_events)
414                 return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
415         return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
416 }
417
418 static int cmd_pending(struct mlx4_dev *dev)
419 {
420         u32 status;
421
422         if (pci_channel_offline(dev->persist->pdev))
423                 return -EIO;
424
425         status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
426
427         return (status & swab32(1 << HCR_GO_BIT)) ||
428                 (mlx4_priv(dev)->cmd.toggle ==
429                  !!(status & swab32(1 << HCR_T_BIT)));
430 }
431
432 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
433                          u32 in_modifier, u8 op_modifier, u16 op, u16 token,
434                          int event)
435 {
436         struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
437         u32 __iomem *hcr = cmd->hcr;
438         int ret = -EIO;
439         unsigned long end;
440
441         mutex_lock(&dev->persist->device_state_mutex);
442         /* To avoid writing to unknown addresses after the device state was
443           * changed to internal error and the chip was reset,
444           * check the INTERNAL_ERROR flag which is updated under
445           * device_state_mutex lock.
446           */
447         if (pci_channel_offline(dev->persist->pdev) ||
448             (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
449                 /*
450                  * Device is going through error recovery
451                  * and cannot accept commands.
452                  */
453                 goto out;
454         }
455
456         end = jiffies;
457         if (event)
458                 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
459
460         while (cmd_pending(dev)) {
461                 if (pci_channel_offline(dev->persist->pdev)) {
462                         /*
463                          * Device is going through error recovery
464                          * and cannot accept commands.
465                          */
466                         goto out;
467                 }
468
469                 if (time_after_eq(jiffies, end)) {
470                         mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
471                         goto out;
472                 }
473                 cond_resched();
474         }
475
476         /*
477          * We use writel (instead of something like memcpy_toio)
478          * because writes of less than 32 bits to the HCR don't work
479          * (and some architectures such as ia64 implement memcpy_toio
480          * in terms of writeb).
481          */
482         __raw_writel((__force u32) cpu_to_be32(in_param >> 32),           hcr + 0);
483         __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
484         __raw_writel((__force u32) cpu_to_be32(in_modifier),              hcr + 2);
485         __raw_writel((__force u32) cpu_to_be32(out_param >> 32),          hcr + 3);
486         __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
487         __raw_writel((__force u32) cpu_to_be32(token << 16),              hcr + 5);
488
489         /* __raw_writel may not order writes. */
490         wmb();
491
492         __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
493                                                (cmd->toggle << HCR_T_BIT)       |
494                                                (event ? (1 << HCR_E_BIT) : 0)   |
495                                                (op_modifier << HCR_OPMOD_SHIFT) |
496                                                op), hcr + 6);
497
498         /*
499          * Make sure that our HCR writes don't get mixed in with
500          * writes from another CPU starting a FW command.
501          */
502         mmiowb();
503
504         cmd->toggle = cmd->toggle ^ 1;
505
506         ret = 0;
507
508 out:
509         if (ret)
510                 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
511                           op, ret, (long long)in_param, in_modifier, op_modifier);
512         mutex_unlock(&dev->persist->device_state_mutex);
513
514         return ret;
515 }
516
517 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
518                           int out_is_imm, u32 in_modifier, u8 op_modifier,
519                           u16 op, unsigned long timeout)
520 {
521         struct mlx4_priv *priv = mlx4_priv(dev);
522         struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
523         int ret;
524
525         mutex_lock(&priv->cmd.slave_cmd_mutex);
526
527         vhcr->in_param = cpu_to_be64(in_param);
528         vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
529         vhcr->in_modifier = cpu_to_be32(in_modifier);
530         vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
531         vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
532         vhcr->status = 0;
533         vhcr->flags = !!(priv->cmd.use_events) << 6;
534
535         if (mlx4_is_master(dev)) {
536                 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
537                 if (!ret) {
538                         if (out_is_imm) {
539                                 if (out_param)
540                                         *out_param =
541                                                 be64_to_cpu(vhcr->out_param);
542                                 else {
543                                         mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
544                                                  op);
545                                         vhcr->status = CMD_STAT_BAD_PARAM;
546                                 }
547                         }
548                         ret = mlx4_status_to_errno(vhcr->status);
549                 }
550                 if (ret &&
551                     dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
552                         ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
553         } else {
554                 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
555                                     MLX4_COMM_TIME + timeout);
556                 if (!ret) {
557                         if (out_is_imm) {
558                                 if (out_param)
559                                         *out_param =
560                                                 be64_to_cpu(vhcr->out_param);
561                                 else {
562                                         mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
563                                                  op);
564                                         vhcr->status = CMD_STAT_BAD_PARAM;
565                                 }
566                         }
567                         ret = mlx4_status_to_errno(vhcr->status);
568                 } else {
569                         if (dev->persist->state &
570                             MLX4_DEVICE_STATE_INTERNAL_ERROR)
571                                 ret = mlx4_internal_err_ret_value(dev, op,
572                                                                   op_modifier);
573                         else
574                                 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
575                 }
576         }
577
578         mutex_unlock(&priv->cmd.slave_cmd_mutex);
579         return ret;
580 }
581
582 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
583                          int out_is_imm, u32 in_modifier, u8 op_modifier,
584                          u16 op, unsigned long timeout)
585 {
586         struct mlx4_priv *priv = mlx4_priv(dev);
587         void __iomem *hcr = priv->cmd.hcr;
588         int err = 0;
589         unsigned long end;
590         u32 stat;
591
592         down(&priv->cmd.poll_sem);
593
594         if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
595                 /*
596                  * Device is going through error recovery
597                  * and cannot accept commands.
598                  */
599                 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
600                 goto out;
601         }
602
603         if (out_is_imm && !out_param) {
604                 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
605                          op);
606                 err = -EINVAL;
607                 goto out;
608         }
609
610         err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
611                             in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
612         if (err)
613                 goto out_reset;
614
615         end = msecs_to_jiffies(timeout) + jiffies;
616         while (cmd_pending(dev) && time_before(jiffies, end)) {
617                 if (pci_channel_offline(dev->persist->pdev)) {
618                         /*
619                          * Device is going through error recovery
620                          * and cannot accept commands.
621                          */
622                         err = -EIO;
623                         goto out_reset;
624                 }
625
626                 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
627                         err = mlx4_internal_err_ret_value(dev, op, op_modifier);
628                         goto out;
629                 }
630
631                 cond_resched();
632         }
633
634         if (cmd_pending(dev)) {
635                 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
636                           op);
637                 err = -EIO;
638                 goto out_reset;
639         }
640
641         if (out_is_imm)
642                 *out_param =
643                         (u64) be32_to_cpu((__force __be32)
644                                           __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
645                         (u64) be32_to_cpu((__force __be32)
646                                           __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
647         stat = be32_to_cpu((__force __be32)
648                            __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
649         err = mlx4_status_to_errno(stat);
650         if (err) {
651                 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
652                          op, stat);
653                 if (mlx4_closing_cmd_fatal_error(op, stat))
654                         goto out_reset;
655                 goto out;
656         }
657
658 out_reset:
659         if (err)
660                 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
661 out:
662         up(&priv->cmd.poll_sem);
663         return err;
664 }
665
666 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
667 {
668         struct mlx4_priv *priv = mlx4_priv(dev);
669         struct mlx4_cmd_context *context =
670                 &priv->cmd.context[token & priv->cmd.token_mask];
671
672         /* previously timed out command completing at long last */
673         if (token != context->token)
674                 return;
675
676         context->fw_status = status;
677         context->result    = mlx4_status_to_errno(status);
678         context->out_param = out_param;
679
680         complete(&context->done);
681 }
682
683 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
684                          int out_is_imm, u32 in_modifier, u8 op_modifier,
685                          u16 op, unsigned long timeout)
686 {
687         struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688         struct mlx4_cmd_context *context;
689         long ret_wait;
690         int err = 0;
691
692         down(&cmd->event_sem);
693
694         spin_lock(&cmd->context_lock);
695         BUG_ON(cmd->free_head < 0);
696         context = &cmd->context[cmd->free_head];
697         context->token += cmd->token_mask + 1;
698         cmd->free_head = context->next;
699         spin_unlock(&cmd->context_lock);
700
701         if (out_is_imm && !out_param) {
702                 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
703                          op);
704                 err = -EINVAL;
705                 goto out;
706         }
707
708         reinit_completion(&context->done);
709
710         err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
711                             in_modifier, op_modifier, op, context->token, 1);
712         if (err)
713                 goto out_reset;
714
715         if (op == MLX4_CMD_SENSE_PORT) {
716                 ret_wait =
717                         wait_for_completion_interruptible_timeout(&context->done,
718                                                                   msecs_to_jiffies(timeout));
719                 if (ret_wait < 0) {
720                         context->fw_status = 0;
721                         context->out_param = 0;
722                         context->result = 0;
723                 }
724         } else {
725                 ret_wait = (long)wait_for_completion_timeout(&context->done,
726                                                              msecs_to_jiffies(timeout));
727         }
728         if (!ret_wait) {
729                 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
730                           op);
731                 if (op == MLX4_CMD_NOP) {
732                         err = -EBUSY;
733                         goto out;
734                 } else {
735                         err = -EIO;
736                         goto out_reset;
737                 }
738         }
739
740         err = context->result;
741         if (err) {
742                 /* Since we do not want to have this error message always
743                  * displayed at driver start when there are ConnectX2 HCAs
744                  * on the host, we deprecate the error message for this
745                  * specific command/input_mod/opcode_mod/fw-status to be debug.
746                  */
747                 if (op == MLX4_CMD_SET_PORT &&
748                     (in_modifier == 1 || in_modifier == 2) &&
749                     op_modifier == MLX4_SET_PORT_IB_OPCODE &&
750                     context->fw_status == CMD_STAT_BAD_SIZE)
751                         mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
752                                  op, context->fw_status);
753                 else
754                         mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
755                                  op, context->fw_status);
756                 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
757                         err = mlx4_internal_err_ret_value(dev, op, op_modifier);
758                 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
759                         goto out_reset;
760
761                 goto out;
762         }
763
764         if (out_is_imm)
765                 *out_param = context->out_param;
766
767 out_reset:
768         if (err)
769                 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
770 out:
771         spin_lock(&cmd->context_lock);
772         context->next = cmd->free_head;
773         cmd->free_head = context - cmd->context;
774         spin_unlock(&cmd->context_lock);
775
776         up(&cmd->event_sem);
777         return err;
778 }
779
780 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
781                int out_is_imm, u32 in_modifier, u8 op_modifier,
782                u16 op, unsigned long timeout, int native)
783 {
784         if (pci_channel_offline(dev->persist->pdev))
785                 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
786
787         if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
788                 int ret;
789
790                 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
791                         return mlx4_internal_err_ret_value(dev, op,
792                                                           op_modifier);
793                 down_read(&mlx4_priv(dev)->cmd.switch_sem);
794                 if (mlx4_priv(dev)->cmd.use_events)
795                         ret = mlx4_cmd_wait(dev, in_param, out_param,
796                                             out_is_imm, in_modifier,
797                                             op_modifier, op, timeout);
798                 else
799                         ret = mlx4_cmd_poll(dev, in_param, out_param,
800                                             out_is_imm, in_modifier,
801                                             op_modifier, op, timeout);
802
803                 up_read(&mlx4_priv(dev)->cmd.switch_sem);
804                 return ret;
805         }
806         return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
807                               in_modifier, op_modifier, op, timeout);
808 }
809 EXPORT_SYMBOL_GPL(__mlx4_cmd);
810
811
812 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
813 {
814         return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
815                         MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
816 }
817
818 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
819                            int slave, u64 slave_addr,
820                            int size, int is_read)
821 {
822         u64 in_param;
823         u64 out_param;
824
825         if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
826             (slave & ~0x7f) | (size & 0xff)) {
827                 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
828                               "master_addr:0x%llx slave_id:%d size:%d\n",
829                               (unsigned long long)slave_addr,
830                               (unsigned long long)master_addr, slave, size);
831                 return -EINVAL;
832         }
833
834         if (is_read) {
835                 in_param = (u64) slave | slave_addr;
836                 out_param = (u64) dev->caps.function | master_addr;
837         } else {
838                 in_param = (u64) dev->caps.function | master_addr;
839                 out_param = (u64) slave | slave_addr;
840         }
841
842         return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
843                             MLX4_CMD_ACCESS_MEM,
844                             MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
845 }
846
847 static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
848                                struct mlx4_cmd_mailbox *inbox,
849                                struct mlx4_cmd_mailbox *outbox)
850 {
851         struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
852         struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
853         int err;
854         int i;
855
856         if (index & 0x1f)
857                 return -EINVAL;
858
859         in_mad->attr_mod = cpu_to_be32(index / 32);
860
861         err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
862                            MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
863                            MLX4_CMD_NATIVE);
864         if (err)
865                 return err;
866
867         for (i = 0; i < 32; ++i)
868                 pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
869
870         return err;
871 }
872
873 static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
874                                struct mlx4_cmd_mailbox *inbox,
875                                struct mlx4_cmd_mailbox *outbox)
876 {
877         int i;
878         int err;
879
880         for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
881                 err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
882                 if (err)
883                         return err;
884         }
885
886         return 0;
887 }
888 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
889 #define PORT_STATE_OFFSET 32
890
891 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
892 {
893         if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
894                 return IB_PORT_ACTIVE;
895         else
896                 return IB_PORT_DOWN;
897 }
898
899 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
900                                 struct mlx4_vhcr *vhcr,
901                                 struct mlx4_cmd_mailbox *inbox,
902                                 struct mlx4_cmd_mailbox *outbox,
903                                 struct mlx4_cmd_info *cmd)
904 {
905         struct ib_smp *smp = inbox->buf;
906         u32 index;
907         u8 port, slave_port;
908         u8 opcode_modifier;
909         u16 *table;
910         int err;
911         int vidx, pidx;
912         int network_view;
913         struct mlx4_priv *priv = mlx4_priv(dev);
914         struct ib_smp *outsmp = outbox->buf;
915         __be16 *outtab = (__be16 *)(outsmp->data);
916         __be32 slave_cap_mask;
917         __be64 slave_node_guid;
918
919         slave_port = vhcr->in_modifier;
920         port = mlx4_slave_convert_port(dev, slave, slave_port);
921
922         /* network-view bit is for driver use only, and should not be passed to FW */
923         opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
924         network_view = !!(vhcr->op_modifier & 0x8);
925
926         if (smp->base_version == 1 &&
927             smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
928             smp->class_version == 1) {
929                 /* host view is paravirtualized */
930                 if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
931                         if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
932                                 index = be32_to_cpu(smp->attr_mod);
933                                 if (port < 1 || port > dev->caps.num_ports)
934                                         return -EINVAL;
935                                 table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
936                                                 sizeof(*table) * 32, GFP_KERNEL);
937
938                                 if (!table)
939                                         return -ENOMEM;
940                                 /* need to get the full pkey table because the paravirtualized
941                                  * pkeys may be scattered among several pkey blocks.
942                                  */
943                                 err = get_full_pkey_table(dev, port, table, inbox, outbox);
944                                 if (!err) {
945                                         for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
946                                                 pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
947                                                 outtab[vidx % 32] = cpu_to_be16(table[pidx]);
948                                         }
949                                 }
950                                 kfree(table);
951                                 return err;
952                         }
953                         if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
954                                 /*get the slave specific caps:*/
955                                 /*do the command */
956                                 smp->attr_mod = cpu_to_be32(port);
957                                 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
958                                             port, opcode_modifier,
959                                             vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
960                                 /* modify the response for slaves */
961                                 if (!err && slave != mlx4_master_func_num(dev)) {
962                                         u8 *state = outsmp->data + PORT_STATE_OFFSET;
963
964                                         if (port < 1 || port > dev->caps.num_ports)
965                                                 return -EINVAL;
966                                         *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
967                                         slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
968                                         memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
969                                 }
970                                 return err;
971                         }
972                         if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
973                                 __be64 guid;
974
975                                 if (port < 1 || port > dev->caps.num_ports)
976                                         return -EINVAL;
977
978                                 guid = mlx4_get_admin_guid(dev, slave, port);
979
980                                 /* set the PF admin guid to the FW/HW burned
981                                  * GUID, if it wasn't yet set
982                                  */
983                                 if (slave == 0 && guid == 0) {
984                                         smp->attr_mod = 0;
985                                         err = mlx4_cmd_box(dev,
986                                                            inbox->dma,
987                                                            outbox->dma,
988                                                            vhcr->in_modifier,
989                                                            opcode_modifier,
990                                                            vhcr->op,
991                                                            MLX4_CMD_TIME_CLASS_C,
992                                                            MLX4_CMD_NATIVE);
993                                         if (err)
994                                                 return err;
995                                         mlx4_set_admin_guid(dev,
996                                                             *(__be64 *)outsmp->
997                                                             data, slave, port);
998                                 } else {
999                                         memcpy(outsmp->data, &guid, 8);
1000                                 }
1001
1002                                 /* clean all other gids */
1003                                 memset(outsmp->data + 8, 0, 56);
1004                                 return 0;
1005                         }
1006                         if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
1007                                 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1008                                              port, opcode_modifier,
1009                                              vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1010                                 if (!err) {
1011                                         slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
1012                                         memcpy(outsmp->data + 12, &slave_node_guid, 8);
1013                                 }
1014                                 return err;
1015                         }
1016                 }
1017         }
1018
1019         /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1020          * These are the MADs used by ib verbs (such as ib_query_gids).
1021          */
1022         if (slave != mlx4_master_func_num(dev) &&
1023             !mlx4_vf_smi_enabled(dev, slave, port)) {
1024                 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1025                       smp->method == IB_MGMT_METHOD_GET) || network_view) {
1026                         mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1027                                  slave, smp->mgmt_class, smp->method,
1028                                  network_view ? "Network" : "Host",
1029                                  be16_to_cpu(smp->attr_id));
1030                         return -EPERM;
1031                 }
1032         }
1033
1034         return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1035                                     vhcr->in_modifier, opcode_modifier,
1036                                     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1037 }
1038
1039 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
1040                      struct mlx4_vhcr *vhcr,
1041                      struct mlx4_cmd_mailbox *inbox,
1042                      struct mlx4_cmd_mailbox *outbox,
1043                      struct mlx4_cmd_info *cmd)
1044 {
1045         return -EPERM;
1046 }
1047
1048 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1049                      struct mlx4_vhcr *vhcr,
1050                      struct mlx4_cmd_mailbox *inbox,
1051                      struct mlx4_cmd_mailbox *outbox,
1052                      struct mlx4_cmd_info *cmd)
1053 {
1054         u64 in_param;
1055         u64 out_param;
1056         int err;
1057
1058         in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1059         out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1060         if (cmd->encode_slave_id) {
1061                 in_param &= 0xffffffffffffff00ll;
1062                 in_param |= slave;
1063         }
1064
1065         err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1066                          vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1067                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1068
1069         if (cmd->out_is_imm)
1070                 vhcr->out_param = out_param;
1071
1072         return err;
1073 }
1074
1075 static struct mlx4_cmd_info cmd_info[] = {
1076         {
1077                 .opcode = MLX4_CMD_QUERY_FW,
1078                 .has_inbox = false,
1079                 .has_outbox = true,
1080                 .out_is_imm = false,
1081                 .encode_slave_id = false,
1082                 .verify = NULL,
1083                 .wrapper = mlx4_QUERY_FW_wrapper
1084         },
1085         {
1086                 .opcode = MLX4_CMD_QUERY_HCA,
1087                 .has_inbox = false,
1088                 .has_outbox = true,
1089                 .out_is_imm = false,
1090                 .encode_slave_id = false,
1091                 .verify = NULL,
1092                 .wrapper = NULL
1093         },
1094         {
1095                 .opcode = MLX4_CMD_QUERY_DEV_CAP,
1096                 .has_inbox = false,
1097                 .has_outbox = true,
1098                 .out_is_imm = false,
1099                 .encode_slave_id = false,
1100                 .verify = NULL,
1101                 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
1102         },
1103         {
1104                 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
1105                 .has_inbox = false,
1106                 .has_outbox = true,
1107                 .out_is_imm = false,
1108                 .encode_slave_id = false,
1109                 .verify = NULL,
1110                 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1111         },
1112         {
1113                 .opcode = MLX4_CMD_QUERY_ADAPTER,
1114                 .has_inbox = false,
1115                 .has_outbox = true,
1116                 .out_is_imm = false,
1117                 .encode_slave_id = false,
1118                 .verify = NULL,
1119                 .wrapper = NULL
1120         },
1121         {
1122                 .opcode = MLX4_CMD_INIT_PORT,
1123                 .has_inbox = false,
1124                 .has_outbox = false,
1125                 .out_is_imm = false,
1126                 .encode_slave_id = false,
1127                 .verify = NULL,
1128                 .wrapper = mlx4_INIT_PORT_wrapper
1129         },
1130         {
1131                 .opcode = MLX4_CMD_CLOSE_PORT,
1132                 .has_inbox = false,
1133                 .has_outbox = false,
1134                 .out_is_imm  = false,
1135                 .encode_slave_id = false,
1136                 .verify = NULL,
1137                 .wrapper = mlx4_CLOSE_PORT_wrapper
1138         },
1139         {
1140                 .opcode = MLX4_CMD_QUERY_PORT,
1141                 .has_inbox = false,
1142                 .has_outbox = true,
1143                 .out_is_imm = false,
1144                 .encode_slave_id = false,
1145                 .verify = NULL,
1146                 .wrapper = mlx4_QUERY_PORT_wrapper
1147         },
1148         {
1149                 .opcode = MLX4_CMD_SET_PORT,
1150                 .has_inbox = true,
1151                 .has_outbox = false,
1152                 .out_is_imm = false,
1153                 .encode_slave_id = false,
1154                 .verify = NULL,
1155                 .wrapper = mlx4_SET_PORT_wrapper
1156         },
1157         {
1158                 .opcode = MLX4_CMD_MAP_EQ,
1159                 .has_inbox = false,
1160                 .has_outbox = false,
1161                 .out_is_imm = false,
1162                 .encode_slave_id = false,
1163                 .verify = NULL,
1164                 .wrapper = mlx4_MAP_EQ_wrapper
1165         },
1166         {
1167                 .opcode = MLX4_CMD_SW2HW_EQ,
1168                 .has_inbox = true,
1169                 .has_outbox = false,
1170                 .out_is_imm = false,
1171                 .encode_slave_id = true,
1172                 .verify = NULL,
1173                 .wrapper = mlx4_SW2HW_EQ_wrapper
1174         },
1175         {
1176                 .opcode = MLX4_CMD_HW_HEALTH_CHECK,
1177                 .has_inbox = false,
1178                 .has_outbox = false,
1179                 .out_is_imm = false,
1180                 .encode_slave_id = false,
1181                 .verify = NULL,
1182                 .wrapper = NULL
1183         },
1184         {
1185                 .opcode = MLX4_CMD_NOP,
1186                 .has_inbox = false,
1187                 .has_outbox = false,
1188                 .out_is_imm = false,
1189                 .encode_slave_id = false,
1190                 .verify = NULL,
1191                 .wrapper = NULL
1192         },
1193         {
1194                 .opcode = MLX4_CMD_CONFIG_DEV,
1195                 .has_inbox = false,
1196                 .has_outbox = true,
1197                 .out_is_imm = false,
1198                 .encode_slave_id = false,
1199                 .verify = NULL,
1200                 .wrapper = mlx4_CONFIG_DEV_wrapper
1201         },
1202         {
1203                 .opcode = MLX4_CMD_ALLOC_RES,
1204                 .has_inbox = false,
1205                 .has_outbox = false,
1206                 .out_is_imm = true,
1207                 .encode_slave_id = false,
1208                 .verify = NULL,
1209                 .wrapper = mlx4_ALLOC_RES_wrapper
1210         },
1211         {
1212                 .opcode = MLX4_CMD_FREE_RES,
1213                 .has_inbox = false,
1214                 .has_outbox = false,
1215                 .out_is_imm = false,
1216                 .encode_slave_id = false,
1217                 .verify = NULL,
1218                 .wrapper = mlx4_FREE_RES_wrapper
1219         },
1220         {
1221                 .opcode = MLX4_CMD_SW2HW_MPT,
1222                 .has_inbox = true,
1223                 .has_outbox = false,
1224                 .out_is_imm = false,
1225                 .encode_slave_id = true,
1226                 .verify = NULL,
1227                 .wrapper = mlx4_SW2HW_MPT_wrapper
1228         },
1229         {
1230                 .opcode = MLX4_CMD_QUERY_MPT,
1231                 .has_inbox = false,
1232                 .has_outbox = true,
1233                 .out_is_imm = false,
1234                 .encode_slave_id = false,
1235                 .verify = NULL,
1236                 .wrapper = mlx4_QUERY_MPT_wrapper
1237         },
1238         {
1239                 .opcode = MLX4_CMD_HW2SW_MPT,
1240                 .has_inbox = false,
1241                 .has_outbox = false,
1242                 .out_is_imm = false,
1243                 .encode_slave_id = false,
1244                 .verify = NULL,
1245                 .wrapper = mlx4_HW2SW_MPT_wrapper
1246         },
1247         {
1248                 .opcode = MLX4_CMD_READ_MTT,
1249                 .has_inbox = false,
1250                 .has_outbox = true,
1251                 .out_is_imm = false,
1252                 .encode_slave_id = false,
1253                 .verify = NULL,
1254                 .wrapper = NULL
1255         },
1256         {
1257                 .opcode = MLX4_CMD_WRITE_MTT,
1258                 .has_inbox = true,
1259                 .has_outbox = false,
1260                 .out_is_imm = false,
1261                 .encode_slave_id = false,
1262                 .verify = NULL,
1263                 .wrapper = mlx4_WRITE_MTT_wrapper
1264         },
1265         {
1266                 .opcode = MLX4_CMD_SYNC_TPT,
1267                 .has_inbox = true,
1268                 .has_outbox = false,
1269                 .out_is_imm = false,
1270                 .encode_slave_id = false,
1271                 .verify = NULL,
1272                 .wrapper = NULL
1273         },
1274         {
1275                 .opcode = MLX4_CMD_HW2SW_EQ,
1276                 .has_inbox = false,
1277                 .has_outbox = false,
1278                 .out_is_imm = false,
1279                 .encode_slave_id = true,
1280                 .verify = NULL,
1281                 .wrapper = mlx4_HW2SW_EQ_wrapper
1282         },
1283         {
1284                 .opcode = MLX4_CMD_QUERY_EQ,
1285                 .has_inbox = false,
1286                 .has_outbox = true,
1287                 .out_is_imm = false,
1288                 .encode_slave_id = true,
1289                 .verify = NULL,
1290                 .wrapper = mlx4_QUERY_EQ_wrapper
1291         },
1292         {
1293                 .opcode = MLX4_CMD_SW2HW_CQ,
1294                 .has_inbox = true,
1295                 .has_outbox = false,
1296                 .out_is_imm = false,
1297                 .encode_slave_id = true,
1298                 .verify = NULL,
1299                 .wrapper = mlx4_SW2HW_CQ_wrapper
1300         },
1301         {
1302                 .opcode = MLX4_CMD_HW2SW_CQ,
1303                 .has_inbox = false,
1304                 .has_outbox = false,
1305                 .out_is_imm = false,
1306                 .encode_slave_id = false,
1307                 .verify = NULL,
1308                 .wrapper = mlx4_HW2SW_CQ_wrapper
1309         },
1310         {
1311                 .opcode = MLX4_CMD_QUERY_CQ,
1312                 .has_inbox = false,
1313                 .has_outbox = true,
1314                 .out_is_imm = false,
1315                 .encode_slave_id = false,
1316                 .verify = NULL,
1317                 .wrapper = mlx4_QUERY_CQ_wrapper
1318         },
1319         {
1320                 .opcode = MLX4_CMD_MODIFY_CQ,
1321                 .has_inbox = true,
1322                 .has_outbox = false,
1323                 .out_is_imm = true,
1324                 .encode_slave_id = false,
1325                 .verify = NULL,
1326                 .wrapper = mlx4_MODIFY_CQ_wrapper
1327         },
1328         {
1329                 .opcode = MLX4_CMD_SW2HW_SRQ,
1330                 .has_inbox = true,
1331                 .has_outbox = false,
1332                 .out_is_imm = false,
1333                 .encode_slave_id = true,
1334                 .verify = NULL,
1335                 .wrapper = mlx4_SW2HW_SRQ_wrapper
1336         },
1337         {
1338                 .opcode = MLX4_CMD_HW2SW_SRQ,
1339                 .has_inbox = false,
1340                 .has_outbox = false,
1341                 .out_is_imm = false,
1342                 .encode_slave_id = false,
1343                 .verify = NULL,
1344                 .wrapper = mlx4_HW2SW_SRQ_wrapper
1345         },
1346         {
1347                 .opcode = MLX4_CMD_QUERY_SRQ,
1348                 .has_inbox = false,
1349                 .has_outbox = true,
1350                 .out_is_imm = false,
1351                 .encode_slave_id = false,
1352                 .verify = NULL,
1353                 .wrapper = mlx4_QUERY_SRQ_wrapper
1354         },
1355         {
1356                 .opcode = MLX4_CMD_ARM_SRQ,
1357                 .has_inbox = false,
1358                 .has_outbox = false,
1359                 .out_is_imm = false,
1360                 .encode_slave_id = false,
1361                 .verify = NULL,
1362                 .wrapper = mlx4_ARM_SRQ_wrapper
1363         },
1364         {
1365                 .opcode = MLX4_CMD_RST2INIT_QP,
1366                 .has_inbox = true,
1367                 .has_outbox = false,
1368                 .out_is_imm = false,
1369                 .encode_slave_id = true,
1370                 .verify = NULL,
1371                 .wrapper = mlx4_RST2INIT_QP_wrapper
1372         },
1373         {
1374                 .opcode = MLX4_CMD_INIT2INIT_QP,
1375                 .has_inbox = true,
1376                 .has_outbox = false,
1377                 .out_is_imm = false,
1378                 .encode_slave_id = false,
1379                 .verify = NULL,
1380                 .wrapper = mlx4_INIT2INIT_QP_wrapper
1381         },
1382         {
1383                 .opcode = MLX4_CMD_INIT2RTR_QP,
1384                 .has_inbox = true,
1385                 .has_outbox = false,
1386                 .out_is_imm = false,
1387                 .encode_slave_id = false,
1388                 .verify = NULL,
1389                 .wrapper = mlx4_INIT2RTR_QP_wrapper
1390         },
1391         {
1392                 .opcode = MLX4_CMD_RTR2RTS_QP,
1393                 .has_inbox = true,
1394                 .has_outbox = false,
1395                 .out_is_imm = false,
1396                 .encode_slave_id = false,
1397                 .verify = NULL,
1398                 .wrapper = mlx4_RTR2RTS_QP_wrapper
1399         },
1400         {
1401                 .opcode = MLX4_CMD_RTS2RTS_QP,
1402                 .has_inbox = true,
1403                 .has_outbox = false,
1404                 .out_is_imm = false,
1405                 .encode_slave_id = false,
1406                 .verify = NULL,
1407                 .wrapper = mlx4_RTS2RTS_QP_wrapper
1408         },
1409         {
1410                 .opcode = MLX4_CMD_SQERR2RTS_QP,
1411                 .has_inbox = true,
1412                 .has_outbox = false,
1413                 .out_is_imm = false,
1414                 .encode_slave_id = false,
1415                 .verify = NULL,
1416                 .wrapper = mlx4_SQERR2RTS_QP_wrapper
1417         },
1418         {
1419                 .opcode = MLX4_CMD_2ERR_QP,
1420                 .has_inbox = false,
1421                 .has_outbox = false,
1422                 .out_is_imm = false,
1423                 .encode_slave_id = false,
1424                 .verify = NULL,
1425                 .wrapper = mlx4_GEN_QP_wrapper
1426         },
1427         {
1428                 .opcode = MLX4_CMD_RTS2SQD_QP,
1429                 .has_inbox = false,
1430                 .has_outbox = false,
1431                 .out_is_imm = false,
1432                 .encode_slave_id = false,
1433                 .verify = NULL,
1434                 .wrapper = mlx4_GEN_QP_wrapper
1435         },
1436         {
1437                 .opcode = MLX4_CMD_SQD2SQD_QP,
1438                 .has_inbox = true,
1439                 .has_outbox = false,
1440                 .out_is_imm = false,
1441                 .encode_slave_id = false,
1442                 .verify = NULL,
1443                 .wrapper = mlx4_SQD2SQD_QP_wrapper
1444         },
1445         {
1446                 .opcode = MLX4_CMD_SQD2RTS_QP,
1447                 .has_inbox = true,
1448                 .has_outbox = false,
1449                 .out_is_imm = false,
1450                 .encode_slave_id = false,
1451                 .verify = NULL,
1452                 .wrapper = mlx4_SQD2RTS_QP_wrapper
1453         },
1454         {
1455                 .opcode = MLX4_CMD_2RST_QP,
1456                 .has_inbox = false,
1457                 .has_outbox = false,
1458                 .out_is_imm = false,
1459                 .encode_slave_id = false,
1460                 .verify = NULL,
1461                 .wrapper = mlx4_2RST_QP_wrapper
1462         },
1463         {
1464                 .opcode = MLX4_CMD_QUERY_QP,
1465                 .has_inbox = false,
1466                 .has_outbox = true,
1467                 .out_is_imm = false,
1468                 .encode_slave_id = false,
1469                 .verify = NULL,
1470                 .wrapper = mlx4_GEN_QP_wrapper
1471         },
1472         {
1473                 .opcode = MLX4_CMD_SUSPEND_QP,
1474                 .has_inbox = false,
1475                 .has_outbox = false,
1476                 .out_is_imm = false,
1477                 .encode_slave_id = false,
1478                 .verify = NULL,
1479                 .wrapper = mlx4_GEN_QP_wrapper
1480         },
1481         {
1482                 .opcode = MLX4_CMD_UNSUSPEND_QP,
1483                 .has_inbox = false,
1484                 .has_outbox = false,
1485                 .out_is_imm = false,
1486                 .encode_slave_id = false,
1487                 .verify = NULL,
1488                 .wrapper = mlx4_GEN_QP_wrapper
1489         },
1490         {
1491                 .opcode = MLX4_CMD_UPDATE_QP,
1492                 .has_inbox = true,
1493                 .has_outbox = false,
1494                 .out_is_imm = false,
1495                 .encode_slave_id = false,
1496                 .verify = NULL,
1497                 .wrapper = mlx4_UPDATE_QP_wrapper
1498         },
1499         {
1500                 .opcode = MLX4_CMD_GET_OP_REQ,
1501                 .has_inbox = false,
1502                 .has_outbox = false,
1503                 .out_is_imm = false,
1504                 .encode_slave_id = false,
1505                 .verify = NULL,
1506                 .wrapper = mlx4_CMD_EPERM_wrapper,
1507         },
1508         {
1509                 .opcode = MLX4_CMD_ALLOCATE_VPP,
1510                 .has_inbox = false,
1511                 .has_outbox = true,
1512                 .out_is_imm = false,
1513                 .encode_slave_id = false,
1514                 .verify = NULL,
1515                 .wrapper = mlx4_CMD_EPERM_wrapper,
1516         },
1517         {
1518                 .opcode = MLX4_CMD_SET_VPORT_QOS,
1519                 .has_inbox = false,
1520                 .has_outbox = true,
1521                 .out_is_imm = false,
1522                 .encode_slave_id = false,
1523                 .verify = NULL,
1524                 .wrapper = mlx4_CMD_EPERM_wrapper,
1525         },
1526         {
1527                 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1528                 .has_inbox = false,
1529                 .has_outbox = false,
1530                 .out_is_imm = false,
1531                 .encode_slave_id = false,
1532                 .verify = NULL, /* XXX verify: only demux can do this */
1533                 .wrapper = NULL
1534         },
1535         {
1536                 .opcode = MLX4_CMD_MAD_IFC,
1537                 .has_inbox = true,
1538                 .has_outbox = true,
1539                 .out_is_imm = false,
1540                 .encode_slave_id = false,
1541                 .verify = NULL,
1542                 .wrapper = mlx4_MAD_IFC_wrapper
1543         },
1544         {
1545                 .opcode = MLX4_CMD_MAD_DEMUX,
1546                 .has_inbox = false,
1547                 .has_outbox = false,
1548                 .out_is_imm = false,
1549                 .encode_slave_id = false,
1550                 .verify = NULL,
1551                 .wrapper = mlx4_CMD_EPERM_wrapper
1552         },
1553         {
1554                 .opcode = MLX4_CMD_QUERY_IF_STAT,
1555                 .has_inbox = false,
1556                 .has_outbox = true,
1557                 .out_is_imm = false,
1558                 .encode_slave_id = false,
1559                 .verify = NULL,
1560                 .wrapper = mlx4_QUERY_IF_STAT_wrapper
1561         },
1562         {
1563                 .opcode = MLX4_CMD_ACCESS_REG,
1564                 .has_inbox = true,
1565                 .has_outbox = true,
1566                 .out_is_imm = false,
1567                 .encode_slave_id = false,
1568                 .verify = NULL,
1569                 .wrapper = mlx4_ACCESS_REG_wrapper,
1570         },
1571         {
1572                 .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1573                 .has_inbox = false,
1574                 .has_outbox = false,
1575                 .out_is_imm = false,
1576                 .encode_slave_id = false,
1577                 .verify = NULL,
1578                 .wrapper = mlx4_CMD_EPERM_wrapper,
1579         },
1580         /* Native multicast commands are not available for guests */
1581         {
1582                 .opcode = MLX4_CMD_QP_ATTACH,
1583                 .has_inbox = true,
1584                 .has_outbox = false,
1585                 .out_is_imm = false,
1586                 .encode_slave_id = false,
1587                 .verify = NULL,
1588                 .wrapper = mlx4_QP_ATTACH_wrapper
1589         },
1590         {
1591                 .opcode = MLX4_CMD_PROMISC,
1592                 .has_inbox = false,
1593                 .has_outbox = false,
1594                 .out_is_imm = false,
1595                 .encode_slave_id = false,
1596                 .verify = NULL,
1597                 .wrapper = mlx4_PROMISC_wrapper
1598         },
1599         /* Ethernet specific commands */
1600         {
1601                 .opcode = MLX4_CMD_SET_VLAN_FLTR,
1602                 .has_inbox = true,
1603                 .has_outbox = false,
1604                 .out_is_imm = false,
1605                 .encode_slave_id = false,
1606                 .verify = NULL,
1607                 .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1608         },
1609         {
1610                 .opcode = MLX4_CMD_SET_MCAST_FLTR,
1611                 .has_inbox = false,
1612                 .has_outbox = false,
1613                 .out_is_imm = false,
1614                 .encode_slave_id = false,
1615                 .verify = NULL,
1616                 .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1617         },
1618         {
1619                 .opcode = MLX4_CMD_DUMP_ETH_STATS,
1620                 .has_inbox = false,
1621                 .has_outbox = true,
1622                 .out_is_imm = false,
1623                 .encode_slave_id = false,
1624                 .verify = NULL,
1625                 .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1626         },
1627         {
1628                 .opcode = MLX4_CMD_INFORM_FLR_DONE,
1629                 .has_inbox = false,
1630                 .has_outbox = false,
1631                 .out_is_imm = false,
1632                 .encode_slave_id = false,
1633                 .verify = NULL,
1634                 .wrapper = NULL
1635         },
1636         /* flow steering commands */
1637         {
1638                 .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1639                 .has_inbox = true,
1640                 .has_outbox = false,
1641                 .out_is_imm = true,
1642                 .encode_slave_id = false,
1643                 .verify = NULL,
1644                 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1645         },
1646         {
1647                 .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1648                 .has_inbox = false,
1649                 .has_outbox = false,
1650                 .out_is_imm = false,
1651                 .encode_slave_id = false,
1652                 .verify = NULL,
1653                 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1654         },
1655         {
1656                 .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1657                 .has_inbox = false,
1658                 .has_outbox = false,
1659                 .out_is_imm = false,
1660                 .encode_slave_id = false,
1661                 .verify = NULL,
1662                 .wrapper = mlx4_CMD_EPERM_wrapper
1663         },
1664         {
1665                 .opcode = MLX4_CMD_VIRT_PORT_MAP,
1666                 .has_inbox = false,
1667                 .has_outbox = false,
1668                 .out_is_imm = false,
1669                 .encode_slave_id = false,
1670                 .verify = NULL,
1671                 .wrapper = mlx4_CMD_EPERM_wrapper
1672         },
1673 };
1674
1675 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1676                                     struct mlx4_vhcr_cmd *in_vhcr)
1677 {
1678         struct mlx4_priv *priv = mlx4_priv(dev);
1679         struct mlx4_cmd_info *cmd = NULL;
1680         struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1681         struct mlx4_vhcr *vhcr;
1682         struct mlx4_cmd_mailbox *inbox = NULL;
1683         struct mlx4_cmd_mailbox *outbox = NULL;
1684         u64 in_param;
1685         u64 out_param;
1686         int ret = 0;
1687         int i;
1688         int err = 0;
1689
1690         /* Create sw representation of Virtual HCR */
1691         vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1692         if (!vhcr)
1693                 return -ENOMEM;
1694
1695         /* DMA in the vHCR */
1696         if (!in_vhcr) {
1697                 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1698                                       priv->mfunc.master.slave_state[slave].vhcr_dma,
1699                                       ALIGN(sizeof(struct mlx4_vhcr_cmd),
1700                                             MLX4_ACCESS_MEM_ALIGN), 1);
1701                 if (ret) {
1702                         if (!(dev->persist->state &
1703                             MLX4_DEVICE_STATE_INTERNAL_ERROR))
1704                                 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1705                                          __func__, ret);
1706                         kfree(vhcr);
1707                         return ret;
1708                 }
1709         }
1710
1711         /* Fill SW VHCR fields */
1712         vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1713         vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1714         vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1715         vhcr->token = be16_to_cpu(vhcr_cmd->token);
1716         vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1717         vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1718         vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1719
1720         /* Lookup command */
1721         for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1722                 if (vhcr->op == cmd_info[i].opcode) {
1723                         cmd = &cmd_info[i];
1724                         break;
1725                 }
1726         }
1727         if (!cmd) {
1728                 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1729                          vhcr->op, slave);
1730                 vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1731                 goto out_status;
1732         }
1733
1734         /* Read inbox */
1735         if (cmd->has_inbox) {
1736                 vhcr->in_param &= INBOX_MASK;
1737                 inbox = mlx4_alloc_cmd_mailbox(dev);
1738                 if (IS_ERR(inbox)) {
1739                         vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1740                         inbox = NULL;
1741                         goto out_status;
1742                 }
1743
1744                 ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1745                                       vhcr->in_param,
1746                                       MLX4_MAILBOX_SIZE, 1);
1747                 if (ret) {
1748                         if (!(dev->persist->state &
1749                             MLX4_DEVICE_STATE_INTERNAL_ERROR))
1750                                 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1751                                          __func__, cmd->opcode);
1752                         vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1753                         goto out_status;
1754                 }
1755         }
1756
1757         /* Apply permission and bound checks if applicable */
1758         if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1759                 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1760                           vhcr->op, slave, vhcr->in_modifier);
1761                 vhcr_cmd->status = CMD_STAT_BAD_OP;
1762                 goto out_status;
1763         }
1764
1765         /* Allocate outbox */
1766         if (cmd->has_outbox) {
1767                 outbox = mlx4_alloc_cmd_mailbox(dev);
1768                 if (IS_ERR(outbox)) {
1769                         vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1770                         outbox = NULL;
1771                         goto out_status;
1772                 }
1773         }
1774
1775         /* Execute the command! */
1776         if (cmd->wrapper) {
1777                 err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1778                                    cmd);
1779                 if (cmd->out_is_imm)
1780                         vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1781         } else {
1782                 in_param = cmd->has_inbox ? (u64) inbox->dma :
1783                         vhcr->in_param;
1784                 out_param = cmd->has_outbox ? (u64) outbox->dma :
1785                         vhcr->out_param;
1786                 err = __mlx4_cmd(dev, in_param, &out_param,
1787                                  cmd->out_is_imm, vhcr->in_modifier,
1788                                  vhcr->op_modifier, vhcr->op,
1789                                  MLX4_CMD_TIME_CLASS_A,
1790                                  MLX4_CMD_NATIVE);
1791
1792                 if (cmd->out_is_imm) {
1793                         vhcr->out_param = out_param;
1794                         vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1795                 }
1796         }
1797
1798         if (err) {
1799                 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1800                         mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1801                                   vhcr->op, slave, vhcr->errno, err);
1802                 vhcr_cmd->status = mlx4_errno_to_status(err);
1803                 goto out_status;
1804         }
1805
1806
1807         /* Write outbox if command completed successfully */
1808         if (cmd->has_outbox && !vhcr_cmd->status) {
1809                 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1810                                       vhcr->out_param,
1811                                       MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1812                 if (ret) {
1813                         /* If we failed to write back the outbox after the
1814                          *command was successfully executed, we must fail this
1815                          * slave, as it is now in undefined state */
1816                         if (!(dev->persist->state &
1817                             MLX4_DEVICE_STATE_INTERNAL_ERROR))
1818                                 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1819                         goto out;
1820                 }
1821         }
1822
1823 out_status:
1824         /* DMA back vhcr result */
1825         if (!in_vhcr) {
1826                 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1827                                       priv->mfunc.master.slave_state[slave].vhcr_dma,
1828                                       ALIGN(sizeof(struct mlx4_vhcr),
1829                                             MLX4_ACCESS_MEM_ALIGN),
1830                                       MLX4_CMD_WRAPPED);
1831                 if (ret)
1832                         mlx4_err(dev, "%s:Failed writing vhcr result\n",
1833                                  __func__);
1834                 else if (vhcr->e_bit &&
1835                          mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1836                                 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1837                                           slave);
1838         }
1839
1840 out:
1841         kfree(vhcr);
1842         mlx4_free_cmd_mailbox(dev, inbox);
1843         mlx4_free_cmd_mailbox(dev, outbox);
1844         return ret;
1845 }
1846
1847 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1848                                             int slave, int port)
1849 {
1850         struct mlx4_vport_oper_state *vp_oper;
1851         struct mlx4_vport_state *vp_admin;
1852         struct mlx4_vf_immed_vlan_work *work;
1853         struct mlx4_dev *dev = &priv->dev;
1854         int err;
1855         int admin_vlan_ix = NO_INDX;
1856
1857         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1858         vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1859
1860         if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1861             vp_oper->state.default_qos == vp_admin->default_qos &&
1862             vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
1863             vp_oper->state.qos_vport == vp_admin->qos_vport)
1864                 return 0;
1865
1866         if (!(priv->mfunc.master.slave_state[slave].active &&
1867               dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1868                 /* even if the UPDATE_QP command isn't supported, we still want
1869                  * to set this VF link according to the admin directive
1870                  */
1871                 return -1;
1872         }
1873
1874         mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1875                  slave, port);
1876         mlx4_dbg(dev, "vlan %d QoS %d link down\n",
1877                  vp_admin->default_vlan, vp_admin->default_qos);
1878
1879         work = kzalloc(sizeof(*work), GFP_KERNEL);
1880         if (!work)
1881                 return -ENOMEM;
1882
1883         if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1884                 if (MLX4_VGT != vp_admin->default_vlan) {
1885                         err = __mlx4_register_vlan(&priv->dev, port,
1886                                                    vp_admin->default_vlan,
1887                                                    &admin_vlan_ix);
1888                         if (err) {
1889                                 kfree(work);
1890                                 mlx4_warn(&priv->dev,
1891                                           "No vlan resources slave %d, port %d\n",
1892                                           slave, port);
1893                                 return err;
1894                         }
1895                 } else {
1896                         admin_vlan_ix = NO_INDX;
1897                 }
1898                 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1899                 mlx4_dbg(&priv->dev,
1900                          "alloc vlan %d idx  %d slave %d port %d\n",
1901                          (int)(vp_admin->default_vlan),
1902                          admin_vlan_ix, slave, port);
1903         }
1904
1905         /* save original vlan ix and vlan id */
1906         work->orig_vlan_id = vp_oper->state.default_vlan;
1907         work->orig_vlan_ix = vp_oper->vlan_idx;
1908
1909         /* handle new qos */
1910         if (vp_oper->state.default_qos != vp_admin->default_qos)
1911                 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1912
1913         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1914                 vp_oper->vlan_idx = admin_vlan_ix;
1915
1916         vp_oper->state.default_vlan = vp_admin->default_vlan;
1917         vp_oper->state.default_qos = vp_admin->default_qos;
1918         vp_oper->state.vlan_proto = vp_admin->vlan_proto;
1919         vp_oper->state.qos_vport = vp_admin->qos_vport;
1920
1921         if (1 /* vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE */)
1922                 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1923
1924         /* iterate over QPs owned by this slave, using UPDATE_QP */
1925         work->port = port;
1926         work->slave = slave;
1927         work->qos = vp_oper->state.default_qos;
1928         work->qos_vport = vp_oper->state.qos_vport;
1929         work->vlan_id = vp_oper->state.default_vlan;
1930         work->vlan_ix = vp_oper->vlan_idx;
1931         work->vlan_proto = vp_oper->state.vlan_proto;
1932         work->priv = priv;
1933         INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1934         queue_work(priv->mfunc.master.comm_wq, &work->work);
1935
1936         return 0;
1937 }
1938
1939 static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1940 {
1941         struct mlx4_qos_manager *port_qos_ctl;
1942         struct mlx4_priv *priv = mlx4_priv(dev);
1943
1944         port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1945         bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1946
1947         /* Enable only default prio at PF init routine */
1948         set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1949 }
1950
1951 static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1952 {
1953         int i;
1954         int err;
1955         int num_vfs;
1956         u16 availible_vpp;
1957         u8 vpp_param[MLX4_NUM_UP];
1958         struct mlx4_qos_manager *port_qos;
1959         struct mlx4_priv *priv = mlx4_priv(dev);
1960
1961         err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1962         if (err) {
1963                 mlx4_info(dev, "Failed query availible VPPs\n");
1964                 return;
1965         }
1966
1967         port_qos = &priv->mfunc.master.qos_ctl[port];
1968         num_vfs = (availible_vpp /
1969                    bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1970
1971         for (i = 0; i < MLX4_NUM_UP; i++) {
1972                 if (test_bit(i, port_qos->priority_bm))
1973                         vpp_param[i] = num_vfs;
1974         }
1975
1976         err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1977         if (err) {
1978                 mlx4_info(dev, "Failed allocating VPPs\n");
1979                 return;
1980         }
1981
1982         /* Query actual allocated VPP, just to make sure */
1983         err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1984         if (err) {
1985                 mlx4_info(dev, "Failed query availible VPPs\n");
1986                 return;
1987         }
1988
1989         port_qos->num_of_qos_vfs = num_vfs;
1990         mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
1991
1992         for (i = 0; i < MLX4_NUM_UP; i++)
1993                 mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1994                          vpp_param[i]);
1995 }
1996
1997 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1998 {
1999         int port, err;
2000         struct mlx4_vport_state *vp_admin;
2001         struct mlx4_vport_oper_state *vp_oper;
2002         struct mlx4_slave_state *slave_state =
2003                 &priv->mfunc.master.slave_state[slave];
2004         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2005                         &priv->dev, slave);
2006         int min_port = find_first_bit(actv_ports.ports,
2007                                       priv->dev.caps.num_ports) + 1;
2008         int max_port = min_port - 1 +
2009                 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2010
2011         for (port = min_port; port <= max_port; port++) {
2012                 if (!test_bit(port - 1, actv_ports.ports))
2013                         continue;
2014                 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2015                         priv->mfunc.master.vf_admin[slave].enable_smi[port];
2016                 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2017                 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2018                 if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
2019                     slave_state->vst_qinq_supported) {
2020                         vp_oper->state.vlan_proto   = vp_admin->vlan_proto;
2021                         vp_oper->state.default_vlan = vp_admin->default_vlan;
2022                         vp_oper->state.default_qos  = vp_admin->default_qos;
2023                 }
2024                 vp_oper->state.mac        = vp_admin->mac;
2025                 vp_oper->state.spoofchk   = vp_admin->spoofchk;
2026                 vp_oper->state.tx_rate    = vp_admin->tx_rate;
2027                 vp_oper->state.qos_vport  = vp_admin->qos_vport;
2028                 vp_oper->state.guid       = vp_admin->guid;
2029
2030                 if (MLX4_VGT != vp_admin->default_vlan) {
2031                         err = __mlx4_register_vlan(&priv->dev, port,
2032                                                    vp_admin->default_vlan, &(vp_oper->vlan_idx));
2033                         if (err) {
2034                                 vp_oper->vlan_idx = NO_INDX;
2035                                 vp_oper->state.default_vlan = MLX4_VGT;
2036                                 vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
2037                                 mlx4_warn(&priv->dev,
2038                                           "No vlan resources slave %d, port %d\n",
2039                                           slave, port);
2040                                 return err;
2041                         }
2042                         mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
2043                                  (int)(vp_oper->state.default_vlan),
2044                                  vp_oper->vlan_idx, slave, port);
2045                 }
2046                 if (vp_admin->spoofchk) {
2047                         vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
2048                                                                port,
2049                                                                vp_admin->mac);
2050                         if (0 > vp_oper->mac_idx) {
2051                                 err = vp_oper->mac_idx;
2052                                 vp_oper->mac_idx = NO_INDX;
2053                                 mlx4_warn(&priv->dev,
2054                                           "No mac resources slave %d, port %d\n",
2055                                           slave, port);
2056                                 return err;
2057                         }
2058                         mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
2059                                  (unsigned long long) vp_oper->state.mac, vp_oper->mac_idx, slave, port);
2060                 }
2061         }
2062         return 0;
2063 }
2064
2065 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
2066 {
2067         int port;
2068         struct mlx4_vport_oper_state *vp_oper;
2069         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2070                         &priv->dev, slave);
2071         int min_port = find_first_bit(actv_ports.ports,
2072                                       priv->dev.caps.num_ports) + 1;
2073         int max_port = min_port - 1 +
2074                 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2075
2076
2077         for (port = min_port; port <= max_port; port++) {
2078                 if (!test_bit(port - 1, actv_ports.ports))
2079                         continue;
2080                 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2081                         MLX4_VF_SMI_DISABLED;
2082                 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2083                 if (NO_INDX != vp_oper->vlan_idx) {
2084                         __mlx4_unregister_vlan(&priv->dev,
2085                                                port, vp_oper->state.default_vlan);
2086                         vp_oper->vlan_idx = NO_INDX;
2087                 }
2088                 if (NO_INDX != vp_oper->mac_idx) {
2089                         __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
2090                         vp_oper->mac_idx = NO_INDX;
2091                 }
2092         }
2093         return;
2094 }
2095
2096 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2097                                u16 param, u8 toggle)
2098 {
2099         struct mlx4_priv *priv = mlx4_priv(dev);
2100         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2101         u32 reply;
2102         u8 is_going_down = 0;
2103         int i;
2104         unsigned long flags;
2105
2106         slave_state[slave].comm_toggle ^= 1;
2107         reply = (u32) slave_state[slave].comm_toggle << 31;
2108         if (toggle != slave_state[slave].comm_toggle) {
2109                 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2110                           toggle, slave);
2111                 goto reset_slave;
2112         }
2113         if (cmd == MLX4_COMM_CMD_RESET) {
2114                 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
2115                 slave_state[slave].active = false;
2116                 slave_state[slave].old_vlan_api = false;
2117                 slave_state[slave].vst_qinq_supported = false;
2118                 mlx4_master_deactivate_admin_state(priv, slave);
2119                 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
2120                                 slave_state[slave].event_eq[i].eqn = -1;
2121                                 slave_state[slave].event_eq[i].token = 0;
2122                 }
2123                 /*check if we are in the middle of FLR process,
2124                 if so return "retry" status to the slave*/
2125                 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
2126                         goto inform_slave_state;
2127
2128                 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
2129
2130                 /* write the version in the event field */
2131                 reply |= mlx4_comm_get_version();
2132
2133                 goto reset_slave;
2134         }
2135         /*command from slave in the middle of FLR*/
2136         if (cmd != MLX4_COMM_CMD_RESET &&
2137             MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
2138                 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2139                           slave, cmd);
2140                 return;
2141         }
2142
2143         switch (cmd) {
2144         case MLX4_COMM_CMD_VHCR0:
2145                 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
2146                         goto reset_slave;
2147                 slave_state[slave].vhcr_dma = ((u64) param) << 48;
2148                 priv->mfunc.master.slave_state[slave].cookie = 0;
2149                 break;
2150         case MLX4_COMM_CMD_VHCR1:
2151                 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2152                         goto reset_slave;
2153                 slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2154                 break;
2155         case MLX4_COMM_CMD_VHCR2:
2156                 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2157                         goto reset_slave;
2158                 slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2159                 break;
2160         case MLX4_COMM_CMD_VHCR_EN:
2161                 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2162                         goto reset_slave;
2163                 slave_state[slave].vhcr_dma |= param;
2164                 if (mlx4_master_activate_admin_state(priv, slave))
2165                                 goto reset_slave;
2166                 slave_state[slave].active = true;
2167                 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2168                 break;
2169         case MLX4_COMM_CMD_VHCR_POST:
2170                 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2171                     (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2172                         mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2173                                   slave, cmd, slave_state[slave].last_cmd);
2174                         goto reset_slave;
2175                 }
2176
2177                 mutex_lock(&priv->cmd.slave_cmd_mutex);
2178                 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2179                         mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2180                                  slave);
2181                         mutex_unlock(&priv->cmd.slave_cmd_mutex);
2182                         goto reset_slave;
2183                 }
2184                 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2185                 break;
2186         default:
2187                 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2188                 goto reset_slave;
2189         }
2190         spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2191         if (!slave_state[slave].is_slave_going_down)
2192                 slave_state[slave].last_cmd = cmd;
2193         else
2194                 is_going_down = 1;
2195         spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2196         if (is_going_down) {
2197                 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2198                           cmd, slave);
2199                 return;
2200         }
2201         __raw_writel((__force u32) cpu_to_be32(reply),
2202                      &priv->mfunc.comm[slave].slave_read);
2203         mmiowb();
2204
2205         return;
2206
2207 reset_slave:
2208         /* cleanup any slave resources */
2209         if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2210                 mlx4_delete_all_resources_for_slave(dev, slave);
2211
2212         if (cmd != MLX4_COMM_CMD_RESET) {
2213                 mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2214                           slave, cmd);
2215                 /* Turn on internal error letting slave reset itself immeditaly,
2216                  * otherwise it might take till timeout on command is passed
2217                  */
2218                 reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2219         }
2220
2221         spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2222         if (!slave_state[slave].is_slave_going_down)
2223                 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2224         spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2225         /*with slave in the middle of flr, no need to clean resources again.*/
2226 inform_slave_state:
2227         memset(&slave_state[slave].event_eq, 0,
2228                sizeof(struct mlx4_slave_event_eq_info));
2229         __raw_writel((__force u32) cpu_to_be32(reply),
2230                      &priv->mfunc.comm[slave].slave_read);
2231         wmb();
2232 }
2233
2234 /* master command processing */
2235 void mlx4_master_comm_channel(struct work_struct *work)
2236 {
2237         struct mlx4_mfunc_master_ctx *master =
2238                 container_of(work,
2239                              struct mlx4_mfunc_master_ctx,
2240                              comm_work);
2241         struct mlx4_mfunc *mfunc =
2242                 container_of(master, struct mlx4_mfunc, master);
2243         struct mlx4_priv *priv =
2244                 container_of(mfunc, struct mlx4_priv, mfunc);
2245         struct mlx4_dev *dev = &priv->dev;
2246         __be32 *bit_vec;
2247         u32 comm_cmd;
2248         u32 vec;
2249         int i, j, slave;
2250         int toggle;
2251         int served = 0;
2252         int reported = 0;
2253         u32 slt;
2254
2255         bit_vec = master->comm_arm_bit_vector;
2256         for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2257                 vec = be32_to_cpu(bit_vec[i]);
2258                 for (j = 0; j < 32; j++) {
2259                         if (!(vec & (1 << j)))
2260                                 continue;
2261                         ++reported;
2262                         slave = (i * 32) + j;
2263                         comm_cmd = swab32(readl(
2264                                           &mfunc->comm[slave].slave_write));
2265                         slt = swab32(readl(&mfunc->comm[slave].slave_read))
2266                                      >> 31;
2267                         toggle = comm_cmd >> 31;
2268                         if (toggle != slt) {
2269                                 if (master->slave_state[slave].comm_toggle
2270                                     != slt) {
2271                                         pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2272                                                 slave, slt,
2273                                                 master->slave_state[slave].comm_toggle);
2274                                         master->slave_state[slave].comm_toggle =
2275                                                 slt;
2276                                 }
2277                                 mlx4_master_do_cmd(dev, slave,
2278                                                    comm_cmd >> 16 & 0xff,
2279                                                    comm_cmd & 0xffff, toggle);
2280                                 ++served;
2281                         }
2282                 }
2283         }
2284
2285         if (reported && reported != served)
2286                 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2287                           reported, served);
2288
2289         if (mlx4_ARM_COMM_CHANNEL(dev))
2290                 mlx4_warn(dev, "Failed to arm comm channel events\n");
2291 }
2292
2293 static int sync_toggles(struct mlx4_dev *dev)
2294 {
2295         struct mlx4_priv *priv = mlx4_priv(dev);
2296         u32 wr_toggle;
2297         u32 rd_toggle;
2298         unsigned long end;
2299
2300         wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2301         if (wr_toggle == 0xffffffff)
2302                 end = jiffies + msecs_to_jiffies(30000);
2303         else
2304                 end = jiffies + msecs_to_jiffies(5000);
2305
2306         while (time_before(jiffies, end)) {
2307                 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2308                 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2309                         /* PCI might be offline */
2310                         msleep(100);
2311                         wr_toggle = swab32(readl(&priv->mfunc.comm->
2312                                            slave_write));
2313                         continue;
2314                 }
2315
2316                 if (rd_toggle >> 31 == wr_toggle >> 31) {
2317                         priv->cmd.comm_toggle = rd_toggle >> 31;
2318                         return 0;
2319                 }
2320
2321                 cond_resched();
2322         }
2323
2324         /*
2325          * we could reach here if for example the previous VM using this
2326          * function misbehaved and left the channel with unsynced state. We
2327          * should fix this here and give this VM a chance to use a properly
2328          * synced channel
2329          */
2330         mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2331         __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2332         __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2333         priv->cmd.comm_toggle = 0;
2334
2335         return 0;
2336 }
2337
2338 int mlx4_multi_func_init(struct mlx4_dev *dev)
2339 {
2340         struct mlx4_priv *priv = mlx4_priv(dev);
2341         struct mlx4_slave_state *s_state;
2342         int i, j, err, port;
2343
2344         if (mlx4_is_master(dev))
2345                 priv->mfunc.comm =
2346                 ioremap(pci_resource_start(dev->persist->pdev,
2347                                            priv->fw.comm_bar) +
2348                         priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2349         else
2350                 priv->mfunc.comm =
2351                 ioremap(pci_resource_start(dev->persist->pdev, 2) +
2352                         MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2353         if (!priv->mfunc.comm) {
2354                 mlx4_err(dev, "Couldn't map communication vector\n");
2355                 goto err_vhcr;
2356         }
2357
2358         if (mlx4_is_master(dev)) {
2359                 struct mlx4_vf_oper_state *vf_oper;
2360                 struct mlx4_vf_admin_state *vf_admin;
2361
2362                 priv->mfunc.master.slave_state =
2363                         kzalloc(dev->num_slaves *
2364                                 sizeof(struct mlx4_slave_state), GFP_KERNEL);
2365                 if (!priv->mfunc.master.slave_state)
2366                         goto err_comm;
2367
2368                 priv->mfunc.master.vf_admin =
2369                         kzalloc(dev->num_slaves *
2370                                 sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2371                 if (!priv->mfunc.master.vf_admin)
2372                         goto err_comm_admin;
2373
2374                 priv->mfunc.master.vf_oper =
2375                         kzalloc(dev->num_slaves *
2376                                 sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2377                 if (!priv->mfunc.master.vf_oper)
2378                         goto err_comm_oper;
2379
2380                 for (i = 0; i < dev->num_slaves; ++i) {
2381                         vf_admin = &priv->mfunc.master.vf_admin[i];
2382                         vf_oper = &priv->mfunc.master.vf_oper[i];
2383                         s_state = &priv->mfunc.master.slave_state[i];
2384                         s_state->last_cmd = MLX4_COMM_CMD_RESET;
2385                         s_state->vst_qinq_supported = false;
2386                         mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2387                         for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2388                                 s_state->event_eq[j].eqn = -1;
2389                         __raw_writel((__force u32) 0,
2390                                      &priv->mfunc.comm[i].slave_write);
2391                         __raw_writel((__force u32) 0,
2392                                      &priv->mfunc.comm[i].slave_read);
2393                         mmiowb();
2394                         for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2395                                 struct mlx4_vport_state *admin_vport;
2396                                 struct mlx4_vport_state *oper_vport;
2397
2398                                 s_state->vlan_filter[port] =
2399                                         kzalloc(sizeof(struct mlx4_vlan_fltr),
2400                                                 GFP_KERNEL);
2401                                 if (!s_state->vlan_filter[port]) {
2402                                         if (--port)
2403                                                 kfree(s_state->vlan_filter[port]);
2404                                         goto err_slaves;
2405                                 }
2406
2407                                 admin_vport = &vf_admin->vport[port];
2408                                 oper_vport = &vf_oper->vport[port].state;
2409                                 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2410                                 admin_vport->default_vlan = MLX4_VGT;
2411                                 oper_vport->default_vlan = MLX4_VGT;
2412                                 admin_vport->qos_vport =
2413                                                 MLX4_VPP_DEFAULT_VPORT;
2414                                 oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2415                                 admin_vport->vlan_proto = htons(ETH_P_8021Q);
2416                                 oper_vport->vlan_proto = htons(ETH_P_8021Q);
2417                                 vf_oper->vport[port].vlan_idx = NO_INDX;
2418                                 vf_oper->vport[port].mac_idx = NO_INDX;
2419                                 mlx4_set_random_admin_guid(dev, i, port);
2420                         }
2421                         spin_lock_init(&s_state->lock);
2422                 }
2423
2424                 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2425                         for (port = 1; port <= dev->caps.num_ports; port++) {
2426                                 if (mlx4_is_eth(dev, port)) {
2427                                         mlx4_set_default_port_qos(dev, port);
2428                                         mlx4_allocate_port_vpps(dev, port);
2429                                 }
2430                         }
2431                 }
2432
2433                 memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
2434                 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2435                 INIT_WORK(&priv->mfunc.master.comm_work,
2436                           mlx4_master_comm_channel);
2437                 INIT_WORK(&priv->mfunc.master.slave_event_work,
2438                           mlx4_gen_slave_eqe);
2439                 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2440                           mlx4_master_handle_slave_flr);
2441                 spin_lock_init(&priv->mfunc.master.slave_state_lock);
2442                 spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2443                 priv->mfunc.master.comm_wq =
2444                         create_singlethread_workqueue("mlx4_comm");
2445                 if (!priv->mfunc.master.comm_wq)
2446                         goto err_slaves;
2447
2448                 if (mlx4_init_resource_tracker(dev))
2449                         goto err_thread;
2450
2451         } else {
2452                 err = sync_toggles(dev);
2453                 if (err) {
2454                         mlx4_err(dev, "Couldn't sync toggles\n");
2455                         goto err_comm;
2456                 }
2457         }
2458         return 0;
2459
2460 err_thread:
2461         flush_workqueue(priv->mfunc.master.comm_wq);
2462         destroy_workqueue(priv->mfunc.master.comm_wq);
2463 err_slaves:
2464         while (i--) {
2465                 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2466                         kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2467         }
2468         kfree(priv->mfunc.master.vf_oper);
2469 err_comm_oper:
2470         kfree(priv->mfunc.master.vf_admin);
2471 err_comm_admin:
2472         kfree(priv->mfunc.master.slave_state);
2473 err_comm:
2474         iounmap(priv->mfunc.comm);
2475         priv->mfunc.comm = NULL;
2476 err_vhcr:
2477         dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2478                           priv->mfunc.vhcr,
2479                           priv->mfunc.vhcr_dma);
2480         priv->mfunc.vhcr = NULL;
2481         return -ENOMEM;
2482 }
2483
2484 int mlx4_cmd_init(struct mlx4_dev *dev)
2485 {
2486         struct mlx4_priv *priv = mlx4_priv(dev);
2487         int flags = 0;
2488
2489         if (!priv->cmd.initialized) {
2490                 init_rwsem(&priv->cmd.switch_sem);
2491                 mutex_init(&priv->cmd.slave_cmd_mutex);
2492                 sema_init(&priv->cmd.poll_sem, 1);
2493                 sema_init(&priv->cmd.event_sem, 0);
2494                 priv->cmd.use_events = 0;
2495                 priv->cmd.toggle     = 1;
2496                 priv->cmd.initialized = 1;
2497                 flags |= MLX4_CMD_CLEANUP_STRUCT;
2498         }
2499
2500         if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2501                 priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2502                                         0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2503                 if (!priv->cmd.hcr) {
2504                         mlx4_err(dev, "Couldn't map command register\n");
2505                         goto err;
2506                 }
2507                 flags |= MLX4_CMD_CLEANUP_HCR;
2508         }
2509
2510         if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2511                 priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2512                                                       PAGE_SIZE,
2513                                                       &priv->mfunc.vhcr_dma,
2514                                                       GFP_KERNEL);
2515                 if (!priv->mfunc.vhcr)
2516                         goto err;
2517
2518                 flags |= MLX4_CMD_CLEANUP_VHCR;
2519         }
2520
2521         if (!priv->cmd.pool) {
2522                 priv->cmd.pool = pci_pool_create("mlx4_cmd",
2523                                                  dev->persist->pdev,
2524                                                  MLX4_MAILBOX_SIZE,
2525                                                  MLX4_MAILBOX_SIZE, 0);
2526                 if (!priv->cmd.pool)
2527                         goto err;
2528
2529                 flags |= MLX4_CMD_CLEANUP_POOL;
2530         }
2531
2532         return 0;
2533
2534 err:
2535         mlx4_cmd_cleanup(dev, flags);
2536         return -ENOMEM;
2537 }
2538
2539 void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2540 {
2541         struct mlx4_priv *priv = mlx4_priv(dev);
2542         int slave;
2543         u32 slave_read;
2544
2545         /* If the comm channel has not yet been initialized,
2546          * skip reporting the internal error event to all
2547          * the communication channels.
2548          */
2549         if (!priv->mfunc.comm)
2550                 return;
2551
2552         /* Report an internal error event to all
2553          * communication channels.
2554          */
2555         for (slave = 0; slave < dev->num_slaves; slave++) {
2556                 slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2557                 slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2558                 __raw_writel((__force u32)cpu_to_be32(slave_read),
2559                              &priv->mfunc.comm[slave].slave_read);
2560                 /* Make sure that our comm channel write doesn't
2561                  * get mixed in with writes from another CPU.
2562                  */
2563                 mmiowb();
2564         }
2565 }
2566
2567 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2568 {
2569         struct mlx4_priv *priv = mlx4_priv(dev);
2570         int i, port;
2571
2572         if (mlx4_is_master(dev)) {
2573                 flush_workqueue(priv->mfunc.master.comm_wq);
2574                 destroy_workqueue(priv->mfunc.master.comm_wq);
2575                 for (i = 0; i < dev->num_slaves; i++) {
2576                         for (port = 1; port <= MLX4_MAX_PORTS; port++)
2577                                 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2578                 }
2579                 kfree(priv->mfunc.master.slave_state);
2580                 kfree(priv->mfunc.master.vf_admin);
2581                 kfree(priv->mfunc.master.vf_oper);
2582                 dev->num_slaves = 0;
2583         }
2584
2585         iounmap(priv->mfunc.comm);
2586         priv->mfunc.comm = NULL;
2587 }
2588
2589 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2590 {
2591         struct mlx4_priv *priv = mlx4_priv(dev);
2592
2593         if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2594                 pci_pool_destroy(priv->cmd.pool);
2595                 priv->cmd.pool = NULL;
2596         }
2597
2598         if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2599             (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2600                 iounmap(priv->cmd.hcr);
2601                 priv->cmd.hcr = NULL;
2602         }
2603         if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2604             (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2605                 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2606                                   priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2607                 priv->mfunc.vhcr = NULL;
2608         }
2609         if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2610                 priv->cmd.initialized = 0;
2611 }
2612
2613 /*
2614  * Switch to using events to issue FW commands (can only be called
2615  * after event queue for command events has been initialized).
2616  */
2617 int mlx4_cmd_use_events(struct mlx4_dev *dev)
2618 {
2619         struct mlx4_priv *priv = mlx4_priv(dev);
2620         int i;
2621
2622         if (priv->cmd.use_events != 0)
2623                 return 0;
2624
2625         priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2626                                    sizeof (struct mlx4_cmd_context),
2627                                    GFP_KERNEL);
2628         if (!priv->cmd.context)
2629                 return -ENOMEM;
2630
2631         down_write(&priv->cmd.switch_sem);
2632         for (i = 0; i < priv->cmd.max_cmds; ++i) {
2633                 priv->cmd.context[i].token = i;
2634                 priv->cmd.context[i].next  = i + 1;
2635                 /* To support fatal error flow, initialize all
2636                  * cmd contexts to allow simulating completions
2637                  * with complete() at any time.
2638                  */
2639                 init_completion(&priv->cmd.context[i].done);
2640         }
2641
2642         priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2643         priv->cmd.free_head = 0;
2644
2645         for (i = 0; i != priv->cmd.max_cmds; i++)
2646                 up(&priv->cmd.event_sem);
2647
2648         for (priv->cmd.token_mask = 1;
2649              priv->cmd.token_mask < priv->cmd.max_cmds;
2650              priv->cmd.token_mask <<= 1)
2651                 ; /* nothing */
2652         --priv->cmd.token_mask;
2653
2654         down(&priv->cmd.poll_sem);
2655         priv->cmd.use_events = 1;
2656         up_write(&priv->cmd.switch_sem);
2657
2658         return 0;
2659 }
2660
2661 /*
2662  * Switch back to polling (used when shutting down the device)
2663  */
2664 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2665 {
2666         struct mlx4_priv *priv = mlx4_priv(dev);
2667         int i;
2668
2669         if (priv->cmd.use_events == 0)
2670                 return;
2671
2672         down_write(&priv->cmd.switch_sem);
2673         priv->cmd.use_events = 0;
2674
2675         for (i = 0; i < priv->cmd.max_cmds; ++i)
2676                 down(&priv->cmd.event_sem);
2677
2678         kfree(priv->cmd.context);
2679         priv->cmd.context = NULL;
2680
2681         up(&priv->cmd.poll_sem);
2682         up_write(&priv->cmd.switch_sem);
2683 }
2684
2685 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2686 {
2687         struct mlx4_cmd_mailbox *mailbox;
2688
2689         mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2690         if (!mailbox)
2691                 return ERR_PTR(-ENOMEM);
2692
2693         mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2694                                       &mailbox->dma);
2695         if (!mailbox->buf) {
2696                 kfree(mailbox);
2697                 return ERR_PTR(-ENOMEM);
2698         }
2699
2700         memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2701
2702         return mailbox;
2703 }
2704 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2705
2706 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2707                            struct mlx4_cmd_mailbox *mailbox)
2708 {
2709         if (!mailbox)
2710                 return;
2711
2712         pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2713         kfree(mailbox);
2714 }
2715 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2716
2717 u32 mlx4_comm_get_version(void)
2718 {
2719          return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2720 }
2721
2722 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2723 {
2724         if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2725                 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2726                          vf, dev->persist->num_vfs);
2727                 return -EINVAL;
2728         }
2729
2730         return vf+1;
2731 }
2732
2733 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2734 {
2735         if (slave < 1 || slave > dev->persist->num_vfs) {
2736                 mlx4_err(dev,
2737                          "Bad slave number:%d (number of activated slaves: %lu)\n",
2738                          slave, dev->num_slaves);
2739                 return -EINVAL;
2740         }
2741         return slave - 1;
2742 }
2743
2744 void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2745 {
2746         struct mlx4_priv *priv = mlx4_priv(dev);
2747         struct mlx4_cmd_context *context;
2748         int i;
2749
2750         spin_lock(&priv->cmd.context_lock);
2751         if (priv->cmd.context != NULL) {
2752                 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2753                         context = &priv->cmd.context[i];
2754                         context->fw_status = CMD_STAT_INTERNAL_ERR;
2755                         context->result =
2756                                 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2757                         complete(&context->done);
2758                 }
2759         }
2760         spin_unlock(&priv->cmd.context_lock);
2761 }
2762
2763 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2764 {
2765         struct mlx4_active_ports actv_ports;
2766         int vf;
2767
2768         bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2769
2770         if (slave == 0) {
2771                 bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2772                 return actv_ports;
2773         }
2774
2775         vf = mlx4_get_vf_indx(dev, slave);
2776         if (vf < 0)
2777                 return actv_ports;
2778
2779         bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2780                    min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2781                    dev->caps.num_ports));
2782
2783         return actv_ports;
2784 }
2785 EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2786
2787 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2788 {
2789         unsigned n;
2790         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2791         unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2792
2793         if (port <= 0 || port > m)
2794                 return -EINVAL;
2795
2796         n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2797         if (port <= n)
2798                 port = n + 1;
2799
2800         return port;
2801 }
2802 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2803
2804 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2805 {
2806         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2807         if (test_bit(port - 1, actv_ports.ports))
2808                 return port -
2809                         find_first_bit(actv_ports.ports, dev->caps.num_ports);
2810
2811         return -1;
2812 }
2813 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2814
2815 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2816                                                    int port)
2817 {
2818         unsigned i;
2819         struct mlx4_slaves_pport slaves_pport;
2820
2821         bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2822
2823         if (port <= 0 || port > dev->caps.num_ports)
2824                 return slaves_pport;
2825
2826         for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2827                 struct mlx4_active_ports actv_ports =
2828                         mlx4_get_active_ports(dev, i);
2829                 if (test_bit(port - 1, actv_ports.ports))
2830                         set_bit(i, slaves_pport.slaves);
2831         }
2832
2833         return slaves_pport;
2834 }
2835 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2836
2837 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2838                 struct mlx4_dev *dev,
2839                 const struct mlx4_active_ports *crit_ports)
2840 {
2841         unsigned i;
2842         struct mlx4_slaves_pport slaves_pport;
2843
2844         bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2845
2846         for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2847                 struct mlx4_active_ports actv_ports =
2848                         mlx4_get_active_ports(dev, i);
2849                 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2850                                  dev->caps.num_ports))
2851                         set_bit(i, slaves_pport.slaves);
2852         }
2853
2854         return slaves_pport;
2855 }
2856 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2857
2858 static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2859 {
2860         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2861         int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2862                         + 1;
2863         int max_port = min_port +
2864                 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2865
2866         if (port < min_port)
2867                 port = min_port;
2868         else if (port >= max_port)
2869                 port = max_port - 1;
2870
2871         return port;
2872 }
2873
2874 static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2875                               int max_tx_rate)
2876 {
2877         int i;
2878         int err;
2879         struct mlx4_qos_manager *port_qos;
2880         struct mlx4_dev *dev = &priv->dev;
2881         struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2882
2883         port_qos = &priv->mfunc.master.qos_ctl[port];
2884         memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2885
2886         if (slave > port_qos->num_of_qos_vfs) {
2887                 mlx4_info(dev, "No availible VPP resources for this VF\n");
2888                 return -EINVAL;
2889         }
2890
2891         /* Query for default QoS values from Vport 0 is needed */
2892         err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2893         if (err) {
2894                 mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2895                 return err;
2896         }
2897
2898         for (i = 0; i < MLX4_NUM_UP; i++) {
2899                 if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2900                         vpp_qos[i].max_avg_bw = max_tx_rate;
2901                         vpp_qos[i].enable = 1;
2902                 } else {
2903                         /* if user supplied tx_rate == 0, meaning no rate limit
2904                          * configuration is required. so we are leaving the
2905                          * value of max_avg_bw as queried from Vport 0.
2906                          */
2907                         vpp_qos[i].enable = 0;
2908                 }
2909         }
2910
2911         err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2912         if (err) {
2913                 mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2914                 return err;
2915         }
2916
2917         return 0;
2918 }
2919
2920 static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2921                                         struct mlx4_vport_state *vf_admin)
2922 {
2923         struct mlx4_qos_manager *info;
2924         struct mlx4_priv *priv = mlx4_priv(dev);
2925
2926         if (!mlx4_is_master(dev) ||
2927             !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2928                 return false;
2929
2930         info = &priv->mfunc.master.qos_ctl[port];
2931
2932         if (vf_admin->default_vlan != MLX4_VGT &&
2933             test_bit(vf_admin->default_qos, info->priority_bm))
2934                 return true;
2935
2936         return false;
2937 }
2938
2939 static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2940                                        struct mlx4_vport_state *vf_admin,
2941                                        int vlan, int qos)
2942 {
2943         struct mlx4_vport_state dummy_admin = {0};
2944
2945         if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2946             !vf_admin->tx_rate)
2947                 return true;
2948
2949         dummy_admin.default_qos = qos;
2950         dummy_admin.default_vlan = vlan;
2951
2952         /* VF wants to move to other VST state which is valid with current
2953          * rate limit. Either differnt default vlan in VST or other
2954          * supported QoS priority. Otherwise we don't allow this change when
2955          * the TX rate is still configured.
2956          */
2957         if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2958                 return true;
2959
2960         mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2961                   (vlan == MLX4_VGT) ? "VGT" : "VST");
2962
2963         if (vlan != MLX4_VGT)
2964                 mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2965
2966         mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2967
2968         return false;
2969 }
2970
2971 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2972 {
2973         struct mlx4_priv *priv = mlx4_priv(dev);
2974         struct mlx4_vport_state *s_info;
2975         int slave;
2976
2977         if (!mlx4_is_master(dev))
2978                 return -EPROTONOSUPPORT;
2979
2980         slave = mlx4_get_slave_indx(dev, vf);
2981         if (slave < 0)
2982                 return -EINVAL;
2983
2984         port = mlx4_slaves_closest_port(dev, slave, port);
2985         s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2986         s_info->mac = mac;
2987         mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
2988                   vf, port, (unsigned long long)s_info->mac);
2989         return 0;
2990 }
2991 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2992
2993
2994 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
2995                      __be16 proto)
2996 {
2997         struct mlx4_priv *priv = mlx4_priv(dev);
2998         struct mlx4_vport_state *vf_admin;
2999         struct mlx4_slave_state *slave_state;
3000         struct mlx4_vport_oper_state *vf_oper;
3001         int slave;
3002
3003         if ((!mlx4_is_master(dev)) ||
3004             !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
3005                 return -EPROTONOSUPPORT;
3006
3007         if ((vlan > 4095) || (qos > 7))
3008                 return -EINVAL;
3009
3010         if (proto == htons(ETH_P_8021AD) &&
3011             !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
3012                 return -EPROTONOSUPPORT;
3013
3014         if (proto != htons(ETH_P_8021Q) &&
3015             proto != htons(ETH_P_8021AD))
3016                 return -EINVAL;
3017
3018         if ((proto == htons(ETH_P_8021AD)) &&
3019             ((vlan == 0) || (vlan == MLX4_VGT)))
3020                 return -EINVAL;
3021
3022         slave = mlx4_get_slave_indx(dev, vf);
3023         if (slave < 0)
3024                 return -EINVAL;
3025
3026         slave_state = &priv->mfunc.master.slave_state[slave];
3027         if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
3028             (!slave_state->vst_qinq_supported)) {
3029                 mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
3030                 return -EPROTONOSUPPORT;
3031         }
3032         port = mlx4_slaves_closest_port(dev, slave, port);
3033         vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3034         vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3035
3036         if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
3037                 return -EPERM;
3038
3039         if ((0 == vlan) && (0 == qos))
3040                 vf_admin->default_vlan = MLX4_VGT;
3041         else
3042                 vf_admin->default_vlan = vlan;
3043         vf_admin->default_qos = qos;
3044         vf_admin->vlan_proto = proto;
3045
3046         /* If rate was configured prior to VST, we saved the configured rate
3047          * in vf_admin->rate and now, if priority supported we enforce the QoS
3048          */
3049         if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
3050             vf_admin->tx_rate)
3051                 vf_admin->qos_vport = slave;
3052
3053         /* Try to activate new vf state without restart,
3054          * this option is not supported while moving to VST QinQ mode.
3055          */
3056         if ((proto == htons(ETH_P_8021AD) &&
3057              vf_oper->state.vlan_proto != proto) ||
3058             mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3059                 mlx4_info(dev,
3060                           "updating vf %d port %d config will take effect on next VF restart\n",
3061                           vf, port);
3062         return 0;
3063 }
3064 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
3065
3066 int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
3067                      int max_tx_rate)
3068 {
3069         int err;
3070         int slave;
3071         struct mlx4_vport_state *vf_admin;
3072         struct mlx4_priv *priv = mlx4_priv(dev);
3073
3074         if (!mlx4_is_master(dev) ||
3075             !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
3076                 return -EPROTONOSUPPORT;
3077
3078         if (min_tx_rate) {
3079                 mlx4_info(dev, "Minimum BW share not supported\n");
3080                 return -EPROTONOSUPPORT;
3081         }
3082
3083         slave = mlx4_get_slave_indx(dev, vf);
3084         if (slave < 0)
3085                 return -EINVAL;
3086
3087         port = mlx4_slaves_closest_port(dev, slave, port);
3088         vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3089
3090         err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
3091         if (err) {
3092                 mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
3093                           max_tx_rate);
3094                 return err;
3095         }
3096
3097         vf_admin->tx_rate = max_tx_rate;
3098         /* if VF is not in supported mode (VST with supported prio),
3099          * we do not change vport configuration for its QPs, but save
3100          * the rate, so it will be enforced when it moves to supported
3101          * mode next time.
3102          */
3103         if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
3104                 mlx4_info(dev,
3105                           "rate set for VF %d when not in valid state\n", vf);
3106
3107                 if (vf_admin->default_vlan != MLX4_VGT)
3108                         mlx4_info(dev, "VST priority not supported by QoS\n");
3109                 else
3110                         mlx4_info(dev, "VF in VGT mode (needed VST)\n");
3111
3112                 mlx4_info(dev,
3113                           "rate %d take affect when VF moves to valid state\n",
3114                           max_tx_rate);
3115                 return 0;
3116         }
3117
3118         /* If user sets rate 0 assigning default vport for its QPs */
3119         vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3120
3121         if (priv->mfunc.master.slave_state[slave].active &&
3122             dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3123                 mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3124
3125         return 0;
3126 }
3127 EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3128
3129  /* mlx4_get_slave_default_vlan -
3130  * return true if VST ( default vlan)
3131  * if VST, will return vlan & qos (if not NULL)
3132  */
3133 bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
3134                                  u16 *vlan, u8 *qos)
3135 {
3136         struct mlx4_vport_oper_state *vp_oper;
3137         struct mlx4_priv *priv;
3138
3139         priv = mlx4_priv(dev);
3140         port = mlx4_slaves_closest_port(dev, slave, port);
3141         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3142
3143         if (MLX4_VGT != vp_oper->state.default_vlan) {
3144                 if (vlan)
3145                         *vlan = vp_oper->state.default_vlan;
3146                 if (qos)
3147                         *qos = vp_oper->state.default_qos;
3148                 return true;
3149         }
3150         return false;
3151 }
3152 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
3153
3154 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
3155 {
3156         struct mlx4_priv *priv = mlx4_priv(dev);
3157         struct mlx4_vport_state *s_info;
3158         int slave;
3159
3160         if ((!mlx4_is_master(dev)) ||
3161             !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
3162                 return -EPROTONOSUPPORT;
3163
3164         slave = mlx4_get_slave_indx(dev, vf);
3165         if (slave < 0)
3166                 return -EINVAL;
3167
3168         port = mlx4_slaves_closest_port(dev, slave, port);
3169         s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3170         s_info->spoofchk = setting;
3171
3172         return 0;
3173 }
3174 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
3175
3176 int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
3177                            struct mlx4_counter *counter_stats, int reset)
3178 {
3179         struct mlx4_cmd_mailbox *mailbox = NULL;
3180         struct mlx4_counter *tmp_counter;
3181         int err;
3182         u32 if_stat_in_mod;
3183
3184         if (!counter_stats)
3185                 return -EINVAL;
3186
3187         if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
3188                 return 0;
3189
3190         mailbox = mlx4_alloc_cmd_mailbox(dev);
3191         if (IS_ERR(mailbox))
3192                 return PTR_ERR(mailbox);
3193
3194         memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
3195         if_stat_in_mod = counter_index;
3196         if (reset)
3197                 if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
3198         err = mlx4_cmd_box(dev, 0, mailbox->dma,
3199                            if_stat_in_mod, 0,
3200                            MLX4_CMD_QUERY_IF_STAT,
3201                            MLX4_CMD_TIME_CLASS_C,
3202                            MLX4_CMD_NATIVE);
3203         if (err) {
3204                 mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
3205                          __func__, counter_index);
3206                 goto if_stat_out;
3207         }
3208         tmp_counter = (struct mlx4_counter *)mailbox->buf;
3209         counter_stats->counter_mode = tmp_counter->counter_mode;
3210         if (counter_stats->counter_mode == 0) {
3211                 counter_stats->rx_frames =
3212                         cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
3213                                     be64_to_cpu(tmp_counter->rx_frames));
3214                 counter_stats->tx_frames =
3215                         cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
3216                                     be64_to_cpu(tmp_counter->tx_frames));
3217                 counter_stats->rx_bytes =
3218                         cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
3219                                     be64_to_cpu(tmp_counter->rx_bytes));
3220                 counter_stats->tx_bytes =
3221                         cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
3222                                     be64_to_cpu(tmp_counter->tx_bytes));
3223         }
3224
3225 if_stat_out:
3226         mlx4_free_cmd_mailbox(dev, mailbox);
3227
3228         return err;
3229 }
3230 EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
3231
3232 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
3233 {
3234         struct mlx4_priv *priv = mlx4_priv(dev);
3235
3236         if (slave < 1 || slave >= dev->num_slaves ||
3237             port < 1 || port > MLX4_MAX_PORTS)
3238                 return 0;
3239
3240         return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
3241                 MLX4_VF_SMI_ENABLED;
3242 }
3243 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
3244
3245 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
3246 {
3247         struct mlx4_priv *priv = mlx4_priv(dev);
3248
3249         if (slave == mlx4_master_func_num(dev))
3250                 return 1;
3251
3252         if (slave < 1 || slave >= dev->num_slaves ||
3253             port < 1 || port > MLX4_MAX_PORTS)
3254                 return 0;
3255
3256         return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
3257                 MLX4_VF_SMI_ENABLED;
3258 }
3259 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
3260
3261 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
3262                                  int enabled)
3263 {
3264         struct mlx4_priv *priv = mlx4_priv(dev);
3265         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
3266                         &priv->dev, slave);
3267         int min_port = find_first_bit(actv_ports.ports,
3268                                       priv->dev.caps.num_ports) + 1;
3269         int max_port = min_port - 1 +
3270                 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
3271
3272         if (slave == mlx4_master_func_num(dev))
3273                 return 0;
3274
3275         if (slave < 1 || slave >= dev->num_slaves ||
3276             port < 1 || port > MLX4_MAX_PORTS ||
3277             enabled < 0 || enabled > 1)
3278                 return -EINVAL;
3279
3280         if (min_port == max_port && dev->caps.num_ports > 1) {
3281                 mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
3282                 return -EPROTONOSUPPORT;
3283         }
3284
3285         priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
3286         return 0;
3287 }
3288 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
3289