]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mlx5/mlx5_core/mlx5_cmd.c
Upgrade our copies of clang, llvm, lld, lldb, compiler-rt and libc++ to
[FreeBSD/FreeBSD.git] / sys / dev / mlx5 / mlx5_core / mlx5_cmd.c
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/pci.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/random.h>
35 #include <linux/io-mapping.h>
36 #include <linux/hardirq.h>
37 #include <linux/ktime.h>
38 #include <dev/mlx5/driver.h>
39
40 #include "mlx5_core.h"
41
42 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size);
43 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
44                               struct mlx5_cmd_msg *msg);
45 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
46
47 enum {
48         CMD_IF_REV = 5,
49 };
50
51 enum {
52         CMD_MODE_POLLING,
53         CMD_MODE_EVENTS
54 };
55
56 enum {
57         NUM_LONG_LISTS    = 2,
58         NUM_MED_LISTS     = 64,
59         LONG_LIST_SIZE    = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
60                                 MLX5_CMD_DATA_BLOCK_SIZE,
61         MED_LIST_SIZE     = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
62 };
63
64 enum {
65         MLX5_CMD_DELIVERY_STAT_OK                       = 0x0,
66         MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR               = 0x1,
67         MLX5_CMD_DELIVERY_STAT_TOK_ERR                  = 0x2,
68         MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR          = 0x3,
69         MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR        = 0x4,
70         MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR         = 0x5,
71         MLX5_CMD_DELIVERY_STAT_FW_ERR                   = 0x6,
72         MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR            = 0x7,
73         MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR           = 0x8,
74         MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR      = 0x9,
75         MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR            = 0x10,
76 };
77
78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
79                                            struct mlx5_cmd_msg *in,
80                                            int uin_size,
81                                            struct mlx5_cmd_msg *out,
82                                            void *uout, int uout_size,
83                                            mlx5_cmd_cbk_t cbk,
84                                            void *context, int page_queue)
85 {
86         gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
87         struct mlx5_cmd_work_ent *ent;
88
89         ent = kzalloc(sizeof(*ent), alloc_flags);
90         if (!ent)
91                 return ERR_PTR(-ENOMEM);
92
93         ent->in         = in;
94         ent->uin_size   = uin_size;
95         ent->out        = out;
96         ent->uout       = uout;
97         ent->uout_size  = uout_size;
98         ent->callback   = cbk;
99         ent->context    = context;
100         ent->cmd        = cmd;
101         ent->page_queue = page_queue;
102
103         return ent;
104 }
105
106 static u8 alloc_token(struct mlx5_cmd *cmd)
107 {
108         u8 token;
109
110         spin_lock(&cmd->token_lock);
111         cmd->token++;
112         if (cmd->token == 0)
113                 cmd->token++;
114         token = cmd->token;
115         spin_unlock(&cmd->token_lock);
116
117         return token;
118 }
119
120 static int alloc_ent(struct mlx5_cmd_work_ent *ent)
121 {
122         unsigned long flags;
123         struct mlx5_cmd *cmd = ent->cmd;
124         struct mlx5_core_dev *dev =
125                 container_of(cmd, struct mlx5_core_dev, cmd);
126         int ret = cmd->max_reg_cmds;
127
128         spin_lock_irqsave(&cmd->alloc_lock, flags);
129         if (!ent->page_queue) {
130                 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
131                 if (ret >= cmd->max_reg_cmds)
132                         ret = -1;
133         }
134
135         if (dev->state != MLX5_DEVICE_STATE_UP)
136                 ret = -1;
137
138         if (ret != -1) {
139                 ent->busy = 1;
140                 ent->idx = ret;
141                 clear_bit(ent->idx, &cmd->bitmask);
142                 cmd->ent_arr[ent->idx] = ent;
143         }
144         spin_unlock_irqrestore(&cmd->alloc_lock, flags);
145
146         return ret;
147 }
148
149 static void free_ent(struct mlx5_cmd *cmd, int idx)
150 {
151         unsigned long flags;
152
153         spin_lock_irqsave(&cmd->alloc_lock, flags);
154         set_bit(idx, &cmd->bitmask);
155         spin_unlock_irqrestore(&cmd->alloc_lock, flags);
156 }
157
158 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
159 {
160         return cmd->cmd_buf + (idx << cmd->log_stride);
161 }
162
163 static u8 xor8_buf(void *buf, int len)
164 {
165         u8 *ptr = buf;
166         u8 sum = 0;
167         int i;
168
169         for (i = 0; i < len; i++)
170                 sum ^= ptr[i];
171
172         return sum;
173 }
174
175 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
176 {
177         if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
178                 return -EINVAL;
179
180         if (xor8_buf(block, sizeof(*block)) != 0xff)
181                 return -EINVAL;
182
183         return 0;
184 }
185
186 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
187                            int csum)
188 {
189         block->token = token;
190         if (csum) {
191                 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
192                                             sizeof(block->data) - 2);
193                 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
194         }
195 }
196
197 static void
198 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
199 {
200         size_t i;
201
202         for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
203                 struct mlx5_cmd_prot_block *block;
204
205                 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
206
207                 /* compute signature */
208                 calc_block_sig(block, token, csum);
209
210                 /* check for last block */
211                 if (block->next == 0)
212                         break;
213         }
214
215         /* make sure data gets written to RAM */
216         mlx5_fwp_flush(msg);
217 }
218
219 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
220 {
221         ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
222         calc_chain_sig(ent->in, ent->token, csum);
223         calc_chain_sig(ent->out, ent->token, csum);
224 }
225
226 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
227 {
228         struct mlx5_core_dev *dev = container_of(ent->cmd,
229                                                  struct mlx5_core_dev, cmd);
230         int poll_end = jiffies +
231                                 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
232         u8 own;
233
234         do {
235                 own = ent->lay->status_own;
236                 if (!(own & CMD_OWNER_HW) ||
237                     dev->state != MLX5_DEVICE_STATE_UP) {
238                         ent->ret = 0;
239                         return;
240                 }
241                 usleep_range(5000, 10000);
242         } while (time_before(jiffies, poll_end));
243
244         ent->ret = -ETIMEDOUT;
245 }
246
247 static void free_cmd(struct mlx5_cmd_work_ent *ent)
248 {
249         kfree(ent);
250 }
251
252 static int
253 verify_signature(struct mlx5_cmd_work_ent *ent)
254 {
255         struct mlx5_cmd_msg *msg = ent->out;
256         size_t i;
257         int err;
258         u8 sig;
259
260         sig = xor8_buf(ent->lay, sizeof(*ent->lay));
261         if (sig != 0xff)
262                 return -EINVAL;
263
264         for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
265                 struct mlx5_cmd_prot_block *block;
266
267                 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
268
269                 /* compute signature */
270                 err = verify_block_sig(block);
271                 if (err != 0)
272                         return (err);
273
274                 /* check for last block */
275                 if (block->next == 0)
276                         break;
277         }
278         return (0);
279 }
280
281 static void dump_buf(void *buf, int size, int data_only, int offset)
282 {
283         __be32 *p = buf;
284         int i;
285
286         for (i = 0; i < size; i += 16) {
287                 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
288                          be32_to_cpu(p[1]), be32_to_cpu(p[2]),
289                          be32_to_cpu(p[3]));
290                 p += 4;
291                 offset += 16;
292         }
293         if (!data_only)
294                 pr_debug("\n");
295 }
296
297 const char *mlx5_command_str(int command)
298 {
299         switch (command) {
300         case MLX5_CMD_OP_QUERY_HCA_CAP:
301                 return "QUERY_HCA_CAP";
302
303         case MLX5_CMD_OP_SET_HCA_CAP:
304                 return "SET_HCA_CAP";
305
306         case MLX5_CMD_OP_QUERY_ADAPTER:
307                 return "QUERY_ADAPTER";
308
309         case MLX5_CMD_OP_INIT_HCA:
310                 return "INIT_HCA";
311
312         case MLX5_CMD_OP_TEARDOWN_HCA:
313                 return "TEARDOWN_HCA";
314
315         case MLX5_CMD_OP_ENABLE_HCA:
316                 return "MLX5_CMD_OP_ENABLE_HCA";
317
318         case MLX5_CMD_OP_DISABLE_HCA:
319                 return "MLX5_CMD_OP_DISABLE_HCA";
320
321         case MLX5_CMD_OP_QUERY_PAGES:
322                 return "QUERY_PAGES";
323
324         case MLX5_CMD_OP_MANAGE_PAGES:
325                 return "MANAGE_PAGES";
326
327         case MLX5_CMD_OP_QUERY_ISSI:
328                 return "QUERY_ISSI";
329
330         case MLX5_CMD_OP_SET_ISSI:
331                 return "SET_ISSI";
332
333         case MLX5_CMD_OP_CREATE_MKEY:
334                 return "CREATE_MKEY";
335
336         case MLX5_CMD_OP_QUERY_MKEY:
337                 return "QUERY_MKEY";
338
339         case MLX5_CMD_OP_DESTROY_MKEY:
340                 return "DESTROY_MKEY";
341
342         case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
343                 return "QUERY_SPECIAL_CONTEXTS";
344
345         case MLX5_CMD_OP_PAGE_FAULT_RESUME:
346                 return "PAGE_FAULT_RESUME";
347
348         case MLX5_CMD_OP_CREATE_EQ:
349                 return "CREATE_EQ";
350
351         case MLX5_CMD_OP_DESTROY_EQ:
352                 return "DESTROY_EQ";
353
354         case MLX5_CMD_OP_QUERY_EQ:
355                 return "QUERY_EQ";
356
357         case MLX5_CMD_OP_GEN_EQE:
358                 return "GEN_EQE";
359
360         case MLX5_CMD_OP_CREATE_CQ:
361                 return "CREATE_CQ";
362
363         case MLX5_CMD_OP_DESTROY_CQ:
364                 return "DESTROY_CQ";
365
366         case MLX5_CMD_OP_QUERY_CQ:
367                 return "QUERY_CQ";
368
369         case MLX5_CMD_OP_MODIFY_CQ:
370                 return "MODIFY_CQ";
371
372         case MLX5_CMD_OP_CREATE_QP:
373                 return "CREATE_QP";
374
375         case MLX5_CMD_OP_DESTROY_QP:
376                 return "DESTROY_QP";
377
378         case MLX5_CMD_OP_RST2INIT_QP:
379                 return "RST2INIT_QP";
380
381         case MLX5_CMD_OP_INIT2RTR_QP:
382                 return "INIT2RTR_QP";
383
384         case MLX5_CMD_OP_RTR2RTS_QP:
385                 return "RTR2RTS_QP";
386
387         case MLX5_CMD_OP_RTS2RTS_QP:
388                 return "RTS2RTS_QP";
389
390         case MLX5_CMD_OP_SQERR2RTS_QP:
391                 return "SQERR2RTS_QP";
392
393         case MLX5_CMD_OP_2ERR_QP:
394                 return "2ERR_QP";
395
396         case MLX5_CMD_OP_2RST_QP:
397                 return "2RST_QP";
398
399         case MLX5_CMD_OP_QUERY_QP:
400                 return "QUERY_QP";
401
402         case MLX5_CMD_OP_SQD_RTS_QP:
403                 return "SQD_RTS_QP";
404
405         case MLX5_CMD_OP_MAD_IFC:
406                 return "MAD_IFC";
407
408         case MLX5_CMD_OP_INIT2INIT_QP:
409                 return "INIT2INIT_QP";
410
411         case MLX5_CMD_OP_CREATE_PSV:
412                 return "CREATE_PSV";
413
414         case MLX5_CMD_OP_DESTROY_PSV:
415                 return "DESTROY_PSV";
416
417         case MLX5_CMD_OP_CREATE_SRQ:
418                 return "CREATE_SRQ";
419
420         case MLX5_CMD_OP_DESTROY_SRQ:
421                 return "DESTROY_SRQ";
422
423         case MLX5_CMD_OP_QUERY_SRQ:
424                 return "QUERY_SRQ";
425
426         case MLX5_CMD_OP_ARM_RQ:
427                 return "ARM_RQ";
428
429         case MLX5_CMD_OP_CREATE_XRC_SRQ:
430                 return "CREATE_XRC_SRQ";
431
432         case MLX5_CMD_OP_DESTROY_XRC_SRQ:
433                 return "DESTROY_XRC_SRQ";
434
435         case MLX5_CMD_OP_QUERY_XRC_SRQ:
436                 return "QUERY_XRC_SRQ";
437
438         case MLX5_CMD_OP_ARM_XRC_SRQ:
439                 return "ARM_XRC_SRQ";
440
441         case MLX5_CMD_OP_CREATE_DCT:
442                 return "CREATE_DCT";
443
444         case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
445                 return "SET_DC_CNAK_TRACE";
446
447         case MLX5_CMD_OP_DESTROY_DCT:
448                 return "DESTROY_DCT";
449
450         case MLX5_CMD_OP_DRAIN_DCT:
451                 return "DRAIN_DCT";
452
453         case MLX5_CMD_OP_QUERY_DCT:
454                 return "QUERY_DCT";
455
456         case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
457                 return "ARM_DCT_FOR_KEY_VIOLATION";
458
459         case MLX5_CMD_OP_QUERY_VPORT_STATE:
460                 return "QUERY_VPORT_STATE";
461
462         case MLX5_CMD_OP_MODIFY_VPORT_STATE:
463                 return "MODIFY_VPORT_STATE";
464
465         case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
466                 return "QUERY_ESW_VPORT_CONTEXT";
467
468         case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
469                 return "MODIFY_ESW_VPORT_CONTEXT";
470
471         case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
472                 return "QUERY_NIC_VPORT_CONTEXT";
473
474         case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
475                 return "MODIFY_NIC_VPORT_CONTEXT";
476
477         case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
478                 return "QUERY_ROCE_ADDRESS";
479
480         case MLX5_CMD_OP_SET_ROCE_ADDRESS:
481                 return "SET_ROCE_ADDRESS";
482
483         case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
484                 return "QUERY_HCA_VPORT_CONTEXT";
485
486         case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
487                 return "MODIFY_HCA_VPORT_CONTEXT";
488
489         case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
490                 return "QUERY_HCA_VPORT_GID";
491
492         case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
493                 return "QUERY_HCA_VPORT_PKEY";
494
495         case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
496                 return "QUERY_VPORT_COUNTER";
497
498         case MLX5_CMD_OP_SET_WOL_ROL:
499                 return "SET_WOL_ROL";
500
501         case MLX5_CMD_OP_QUERY_WOL_ROL:
502                 return "QUERY_WOL_ROL";
503
504         case MLX5_CMD_OP_ALLOC_Q_COUNTER:
505                 return "ALLOC_Q_COUNTER";
506
507         case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
508                 return "DEALLOC_Q_COUNTER";
509
510         case MLX5_CMD_OP_QUERY_Q_COUNTER:
511                 return "QUERY_Q_COUNTER";
512
513         case MLX5_CMD_OP_ALLOC_PD:
514                 return "ALLOC_PD";
515
516         case MLX5_CMD_OP_DEALLOC_PD:
517                 return "DEALLOC_PD";
518
519         case MLX5_CMD_OP_ALLOC_UAR:
520                 return "ALLOC_UAR";
521
522         case MLX5_CMD_OP_DEALLOC_UAR:
523                 return "DEALLOC_UAR";
524
525         case MLX5_CMD_OP_CONFIG_INT_MODERATION:
526                 return "CONFIG_INT_MODERATION";
527
528         case MLX5_CMD_OP_ATTACH_TO_MCG:
529                 return "ATTACH_TO_MCG";
530
531         case MLX5_CMD_OP_DETACH_FROM_MCG:
532                 return "DETACH_FROM_MCG";
533
534         case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
535                 return "GET_DROPPED_PACKET_LOG";
536
537         case MLX5_CMD_OP_QUERY_MAD_DEMUX:
538                 return "QUERY_MAD_DEMUX";
539
540         case MLX5_CMD_OP_SET_MAD_DEMUX:
541                 return "SET_MAD_DEMUX";
542
543         case MLX5_CMD_OP_NOP:
544                 return "NOP";
545
546         case MLX5_CMD_OP_ALLOC_XRCD:
547                 return "ALLOC_XRCD";
548
549         case MLX5_CMD_OP_DEALLOC_XRCD:
550                 return "DEALLOC_XRCD";
551
552         case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
553                 return "ALLOC_TRANSPORT_DOMAIN";
554
555         case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
556                 return "DEALLOC_TRANSPORT_DOMAIN";
557
558         case MLX5_CMD_OP_QUERY_CONG_STATUS:
559                 return "QUERY_CONG_STATUS";
560
561         case MLX5_CMD_OP_MODIFY_CONG_STATUS:
562                 return "MODIFY_CONG_STATUS";
563
564         case MLX5_CMD_OP_QUERY_CONG_PARAMS:
565                 return "QUERY_CONG_PARAMS";
566
567         case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
568                 return "MODIFY_CONG_PARAMS";
569
570         case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
571                 return "QUERY_CONG_STATISTICS";
572
573         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
574                 return "ADD_VXLAN_UDP_DPORT";
575
576         case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
577                 return "DELETE_VXLAN_UDP_DPORT";
578
579         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
580                 return "SET_L2_TABLE_ENTRY";
581
582         case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
583                 return "QUERY_L2_TABLE_ENTRY";
584
585         case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
586                 return "DELETE_L2_TABLE_ENTRY";
587
588         case MLX5_CMD_OP_CREATE_RMP:
589                 return "CREATE_RMP";
590
591         case MLX5_CMD_OP_MODIFY_RMP:
592                 return "MODIFY_RMP";
593
594         case MLX5_CMD_OP_DESTROY_RMP:
595                 return "DESTROY_RMP";
596
597         case MLX5_CMD_OP_QUERY_RMP:
598                 return "QUERY_RMP";
599
600         case MLX5_CMD_OP_CREATE_RQT:
601                 return "CREATE_RQT";
602
603         case MLX5_CMD_OP_MODIFY_RQT:
604                 return "MODIFY_RQT";
605
606         case MLX5_CMD_OP_DESTROY_RQT:
607                 return "DESTROY_RQT";
608
609         case MLX5_CMD_OP_QUERY_RQT:
610                 return "QUERY_RQT";
611
612         case MLX5_CMD_OP_ACCESS_REG:
613                 return "MLX5_CMD_OP_ACCESS_REG";
614
615         case MLX5_CMD_OP_CREATE_SQ:
616                 return "CREATE_SQ";
617
618         case MLX5_CMD_OP_MODIFY_SQ:
619                 return "MODIFY_SQ";
620
621         case MLX5_CMD_OP_DESTROY_SQ:
622                 return "DESTROY_SQ";
623
624         case MLX5_CMD_OP_QUERY_SQ:
625                 return "QUERY_SQ";
626
627         case MLX5_CMD_OP_CREATE_RQ:
628                 return "CREATE_RQ";
629
630         case MLX5_CMD_OP_MODIFY_RQ:
631                 return "MODIFY_RQ";
632
633         case MLX5_CMD_OP_DESTROY_RQ:
634                 return "DESTROY_RQ";
635
636         case MLX5_CMD_OP_QUERY_RQ:
637                 return "QUERY_RQ";
638
639         case MLX5_CMD_OP_CREATE_TIR:
640                 return "CREATE_TIR";
641
642         case MLX5_CMD_OP_MODIFY_TIR:
643                 return "MODIFY_TIR";
644
645         case MLX5_CMD_OP_DESTROY_TIR:
646                 return "DESTROY_TIR";
647
648         case MLX5_CMD_OP_QUERY_TIR:
649                 return "QUERY_TIR";
650
651         case MLX5_CMD_OP_CREATE_TIS:
652                 return "CREATE_TIS";
653
654         case MLX5_CMD_OP_MODIFY_TIS:
655                 return "MODIFY_TIS";
656
657         case MLX5_CMD_OP_DESTROY_TIS:
658                 return "DESTROY_TIS";
659
660         case MLX5_CMD_OP_QUERY_TIS:
661                 return "QUERY_TIS";
662
663         case MLX5_CMD_OP_CREATE_FLOW_TABLE:
664                 return "CREATE_FLOW_TABLE";
665
666         case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
667                 return "DESTROY_FLOW_TABLE";
668
669         case MLX5_CMD_OP_QUERY_FLOW_TABLE:
670                 return "QUERY_FLOW_TABLE";
671
672         case MLX5_CMD_OP_CREATE_FLOW_GROUP:
673                 return "CREATE_FLOW_GROUP";
674
675         case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
676                 return "DESTROY_FLOW_GROUP";
677
678         case MLX5_CMD_OP_QUERY_FLOW_GROUP:
679                 return "QUERY_FLOW_GROUP";
680
681         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
682                 return "SET_FLOW_TABLE_ENTRY";
683
684         case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
685                 return "QUERY_FLOW_TABLE_ENTRY";
686
687         case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
688                 return "DELETE_FLOW_TABLE_ENTRY";
689
690         case MLX5_CMD_OP_SET_DIAGNOSTICS:
691                 return "MLX5_CMD_OP_SET_DIAGNOSTICS";
692
693         case MLX5_CMD_OP_QUERY_DIAGNOSTICS:
694                 return "MLX5_CMD_OP_QUERY_DIAGNOSTICS";
695
696         default: return "unknown command opcode";
697         }
698 }
699
700 static void dump_command(struct mlx5_core_dev *dev,
701                          struct mlx5_cmd_work_ent *ent, int input)
702 {
703         u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
704         struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
705         size_t i;
706         int data_only;
707         int offset = 0;
708         int msg_len = input ? ent->uin_size : ent->uout_size;
709         int dump_len;
710
711         data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
712
713         if (data_only)
714                 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
715                                    "dump command data %s(0x%x) %s\n",
716                                    mlx5_command_str(op), op,
717                                    input ? "INPUT" : "OUTPUT");
718         else
719                 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
720                               mlx5_command_str(op), op,
721                               input ? "INPUT" : "OUTPUT");
722
723         if (data_only) {
724                 if (input) {
725                         dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
726                         offset += sizeof(ent->lay->in);
727                 } else {
728                         dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
729                         offset += sizeof(ent->lay->out);
730                 }
731         } else {
732                 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
733                 offset += sizeof(*ent->lay);
734         }
735
736         for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
737                 struct mlx5_cmd_prot_block *block;
738
739                 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
740
741                 if (data_only) {
742                         if (offset >= msg_len)
743                                 break;
744                         dump_len = min_t(int,
745                             MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset);
746
747                         dump_buf(block->data, dump_len, 1, offset);
748                         offset += MLX5_CMD_DATA_BLOCK_SIZE;
749                 } else {
750                         mlx5_core_dbg(dev, "command block:\n");
751                         dump_buf(block, sizeof(*block), 0, offset);
752                         offset += sizeof(*block);
753                 }
754
755                 /* check for last block */
756                 if (block->next == 0)
757                         break;
758         }
759
760         if (data_only)
761                 pr_debug("\n");
762 }
763
764 static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode,
765                                    struct mlx5_outbox_hdr *hdr)
766 {
767         hdr->status = 0;
768         hdr->syndrome = 0;
769
770         switch (opcode) {
771         case MLX5_CMD_OP_TEARDOWN_HCA:
772         case MLX5_CMD_OP_DISABLE_HCA:
773         case MLX5_CMD_OP_MANAGE_PAGES:
774         case MLX5_CMD_OP_DESTROY_MKEY:
775         case MLX5_CMD_OP_DESTROY_EQ:
776         case MLX5_CMD_OP_DESTROY_CQ:
777         case MLX5_CMD_OP_DESTROY_QP:
778         case MLX5_CMD_OP_DESTROY_PSV:
779         case MLX5_CMD_OP_DESTROY_SRQ:
780         case MLX5_CMD_OP_DESTROY_XRC_SRQ:
781         case MLX5_CMD_OP_DESTROY_DCT:
782         case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
783         case MLX5_CMD_OP_DEALLOC_PD:
784         case MLX5_CMD_OP_DEALLOC_UAR:
785         case MLX5_CMD_OP_DETACH_FROM_MCG:
786         case MLX5_CMD_OP_DEALLOC_XRCD:
787         case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
788         case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
789         case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
790         case MLX5_CMD_OP_DESTROY_LAG:
791         case MLX5_CMD_OP_DESTROY_VPORT_LAG:
792         case MLX5_CMD_OP_DESTROY_TIR:
793         case MLX5_CMD_OP_DESTROY_SQ:
794         case MLX5_CMD_OP_DESTROY_RQ:
795         case MLX5_CMD_OP_DESTROY_RMP:
796         case MLX5_CMD_OP_DESTROY_TIS:
797         case MLX5_CMD_OP_DESTROY_RQT:
798         case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
799         case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
800         case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
801         case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
802         case MLX5_CMD_OP_2ERR_QP:
803         case MLX5_CMD_OP_2RST_QP:
804         case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
805         case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
806         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
807         case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
808         case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
809         case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
810         case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
811         case MLX5_CMD_OP_MODIFY_VPORT_STATE:
812         case MLX5_CMD_OP_MODIFY_SQ:
813         case MLX5_CMD_OP_MODIFY_RQ:
814         case MLX5_CMD_OP_MODIFY_TIS:
815         case MLX5_CMD_OP_MODIFY_LAG:
816         case MLX5_CMD_OP_MODIFY_TIR:
817         case MLX5_CMD_OP_MODIFY_RMP:
818         case MLX5_CMD_OP_MODIFY_RQT:
819         case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
820         case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
821         case MLX5_CMD_OP_MODIFY_CONG_STATUS:
822         case MLX5_CMD_OP_MODIFY_CQ:
823         case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
824         case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
825         case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP:
826         case MLX5_CMD_OP_ACCESS_REG:
827         case MLX5_CMD_OP_DRAIN_DCT:
828                 return 0;
829
830         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
831         case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
832         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
833         case MLX5_CMD_OP_ALLOC_PD:
834         case MLX5_CMD_OP_ALLOC_Q_COUNTER:
835         case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
836         case MLX5_CMD_OP_ALLOC_UAR:
837         case MLX5_CMD_OP_ALLOC_XRCD:
838         case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
839         case MLX5_CMD_OP_ARM_RQ:
840         case MLX5_CMD_OP_ARM_XRC_SRQ:
841         case MLX5_CMD_OP_ATTACH_TO_MCG:
842         case MLX5_CMD_OP_CONFIG_INT_MODERATION:
843         case MLX5_CMD_OP_CREATE_CQ:
844         case MLX5_CMD_OP_CREATE_DCT:
845         case MLX5_CMD_OP_CREATE_EQ:
846         case MLX5_CMD_OP_CREATE_FLOW_GROUP:
847         case MLX5_CMD_OP_CREATE_FLOW_TABLE:
848         case MLX5_CMD_OP_CREATE_LAG:
849         case MLX5_CMD_OP_CREATE_MKEY:
850         case MLX5_CMD_OP_CREATE_PSV:
851         case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
852         case MLX5_CMD_OP_CREATE_QP:
853         case MLX5_CMD_OP_CREATE_RMP:
854         case MLX5_CMD_OP_CREATE_RQ:
855         case MLX5_CMD_OP_CREATE_RQT:
856         case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
857         case MLX5_CMD_OP_CREATE_SQ:
858         case MLX5_CMD_OP_CREATE_SRQ:
859         case MLX5_CMD_OP_CREATE_TIR:
860         case MLX5_CMD_OP_CREATE_TIS:
861         case MLX5_CMD_OP_CREATE_VPORT_LAG:
862         case MLX5_CMD_OP_CREATE_XRC_SRQ:
863         case MLX5_CMD_OP_ENABLE_HCA:
864         case MLX5_CMD_OP_GEN_EQE:
865         case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
866         case MLX5_CMD_OP_INIT2INIT_QP:
867         case MLX5_CMD_OP_INIT2RTR_QP:
868         case MLX5_CMD_OP_INIT_HCA:
869         case MLX5_CMD_OP_MAD_IFC:
870         case MLX5_CMD_OP_NOP:
871         case MLX5_CMD_OP_PAGE_FAULT_RESUME:
872         case MLX5_CMD_OP_QUERY_ADAPTER:
873         case MLX5_CMD_OP_QUERY_CONG_PARAMS:
874         case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
875         case MLX5_CMD_OP_QUERY_CONG_STATUS:
876         case MLX5_CMD_OP_QUERY_CQ:
877         case MLX5_CMD_OP_QUERY_DCT:
878         case MLX5_CMD_OP_QUERY_EQ:
879         case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
880         case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
881         case MLX5_CMD_OP_QUERY_FLOW_GROUP:
882         case MLX5_CMD_OP_QUERY_FLOW_TABLE:
883         case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
884         case MLX5_CMD_OP_QUERY_HCA_CAP:
885         case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
886         case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
887         case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
888         case MLX5_CMD_OP_QUERY_ISSI:
889         case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
890         case MLX5_CMD_OP_QUERY_LAG:
891         case MLX5_CMD_OP_QUERY_MAD_DEMUX:
892         case MLX5_CMD_OP_QUERY_MKEY:
893         case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
894         case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
895         case MLX5_CMD_OP_QUERY_PAGES:
896         case MLX5_CMD_OP_QUERY_QP:
897         case MLX5_CMD_OP_QUERY_Q_COUNTER:
898         case MLX5_CMD_OP_QUERY_RMP:
899         case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
900         case MLX5_CMD_OP_QUERY_RQ:
901         case MLX5_CMD_OP_QUERY_RQT:
902         case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
903         case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
904         case MLX5_CMD_OP_QUERY_SQ:
905         case MLX5_CMD_OP_QUERY_SRQ:
906         case MLX5_CMD_OP_QUERY_TIR:
907         case MLX5_CMD_OP_QUERY_TIS:
908         case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
909         case MLX5_CMD_OP_QUERY_VPORT_STATE:
910         case MLX5_CMD_OP_QUERY_XRC_SRQ:
911         case MLX5_CMD_OP_RST2INIT_QP:
912         case MLX5_CMD_OP_RTR2RTS_QP:
913         case MLX5_CMD_OP_RTS2RTS_QP:
914         case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
915         case MLX5_CMD_OP_SET_HCA_CAP:
916         case MLX5_CMD_OP_SET_ISSI:
917         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
918         case MLX5_CMD_OP_SET_MAD_DEMUX:
919         case MLX5_CMD_OP_SET_ROCE_ADDRESS:
920         case MLX5_CMD_OP_SQD_RTS_QP:
921         case MLX5_CMD_OP_SQERR2RTS_QP:
922                 hdr->status = MLX5_CMD_STAT_INT_ERR;
923                 hdr->syndrome = 0xFFFFFFFF;
924                 return -ECANCELED;
925         default:
926                 mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode);
927                 return -EINVAL;
928         }
929 }
930
931 static void complete_command(struct mlx5_cmd_work_ent *ent)
932 {
933         struct mlx5_cmd *cmd = ent->cmd;
934         struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev,
935                                                  cmd);
936         mlx5_cmd_cbk_t callback;
937         void *context;
938
939         s64 ds;
940         struct mlx5_cmd_stats *stats;
941         unsigned long flags;
942         int err;
943         struct semaphore *sem;
944
945         if (ent->page_queue)
946                 sem = &cmd->pages_sem;
947         else
948                 sem = &cmd->sem;
949
950         if (dev->state != MLX5_DEVICE_STATE_UP) {
951                 struct mlx5_outbox_hdr *out_hdr =
952                         (struct mlx5_outbox_hdr *)ent->out;
953                 struct mlx5_inbox_hdr *in_hdr =
954                         (struct mlx5_inbox_hdr *)(ent->in->first.data);
955                 u16 opcode = be16_to_cpu(in_hdr->opcode);
956
957                 ent->ret = set_internal_err_outbox(dev,
958                                                    opcode,
959                                                    out_hdr);
960         }
961
962         if (ent->callback) {
963                 ds = ent->ts2 - ent->ts1;
964                 if (ent->op < ARRAY_SIZE(cmd->stats)) {
965                         stats = &cmd->stats[ent->op];
966                         spin_lock_irqsave(&stats->lock, flags);
967                         stats->sum += ds;
968                         ++stats->n;
969                         spin_unlock_irqrestore(&stats->lock, flags);
970                 }
971
972                 callback = ent->callback;
973                 context = ent->context;
974                 err = ent->ret;
975                 if (!err)
976                         err = mlx5_copy_from_msg(ent->uout,
977                                                  ent->out,
978                                                  ent->uout_size);
979
980                 mlx5_free_cmd_msg(dev, ent->out);
981                 free_msg(dev, ent->in);
982
983                 free_cmd(ent);
984                 callback(err, context);
985         } else {
986                 complete(&ent->done);
987         }
988         up(sem);
989 }
990
991 static void cmd_work_handler(struct work_struct *work)
992 {
993         struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
994         struct mlx5_cmd *cmd = ent->cmd;
995         struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
996         struct mlx5_cmd_layout *lay;
997         struct semaphore *sem;
998
999         sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
1000         if (cmd->moving_to_polling) {
1001                 mlx5_core_warn(dev, "not expecting command execution, ignoring...\n");
1002                 return;
1003         }
1004
1005         down(sem);
1006
1007         if (alloc_ent(ent) < 0) {
1008                 complete_command(ent);
1009                 return;
1010         }
1011
1012         ent->token = alloc_token(cmd);
1013         lay = get_inst(cmd, ent->idx);
1014         ent->lay = lay;
1015         memset(lay, 0, sizeof(*lay));
1016         memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
1017         ent->op = be32_to_cpu(lay->in[0]) >> 16;
1018         if (ent->in->numpages != 0)
1019                 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0));
1020         if (ent->out->numpages != 0)
1021                 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0));
1022         lay->inlen = cpu_to_be32(ent->uin_size);
1023         lay->outlen = cpu_to_be32(ent->uout_size);
1024         lay->type = MLX5_PCI_CMD_XPORT;
1025         lay->token = ent->token;
1026         lay->status_own = CMD_OWNER_HW;
1027         set_signature(ent, !cmd->checksum_disabled);
1028         dump_command(dev, ent, 1);
1029         ent->ts1 = ktime_get_ns();
1030         ent->busy = 0;
1031         /* ring doorbell after the descriptor is valid */
1032         mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
1033         /* make sure data is written to RAM */
1034         mlx5_fwp_flush(cmd->cmd_page);
1035         iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
1036         mmiowb();
1037         /* if not in polling don't use ent after this point*/
1038         if (cmd->mode == CMD_MODE_POLLING) {
1039                 poll_timeout(ent);
1040                 /* make sure we read the descriptor after ownership is SW */
1041                 mlx5_cmd_comp_handler(dev, 1U << ent->idx);
1042         }
1043 }
1044
1045 static const char *deliv_status_to_str(u8 status)
1046 {
1047         switch (status) {
1048         case MLX5_CMD_DELIVERY_STAT_OK:
1049                 return "no errors";
1050         case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1051                 return "signature error";
1052         case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1053                 return "token error";
1054         case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1055                 return "bad block number";
1056         case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1057                 return "output pointer not aligned to block size";
1058         case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1059                 return "input pointer not aligned to block size";
1060         case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1061                 return "firmware internal error";
1062         case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1063                 return "command input length error";
1064         case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1065                 return "command ouput length error";
1066         case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1067                 return "reserved fields not cleared";
1068         case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1069                 return "bad command descriptor type";
1070         default:
1071                 return "unknown status code";
1072         }
1073 }
1074
1075 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
1076 {
1077         struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
1078
1079         return be16_to_cpu(hdr->opcode);
1080 }
1081
1082 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
1083 {
1084         int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
1085         struct mlx5_cmd *cmd = &dev->cmd;
1086         int err;
1087
1088         if (cmd->mode == CMD_MODE_POLLING) {
1089                 wait_for_completion(&ent->done);
1090                 err = ent->ret;
1091         } else {
1092                 if (!wait_for_completion_timeout(&ent->done, timeout))
1093                         err = -ETIMEDOUT;
1094                 else
1095                         err = 0;
1096         }
1097
1098         if (err == -ETIMEDOUT) {
1099                 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1100                                mlx5_command_str(msg_to_opcode(ent->in)),
1101                                msg_to_opcode(ent->in));
1102         }
1103         mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1104                       err, deliv_status_to_str(ent->status), ent->status);
1105
1106         return err;
1107 }
1108
1109 /*  Notes:
1110  *    1. Callback functions may not sleep
1111  *    2. page queue commands do not support asynchrous completion
1112  */
1113 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1114                            int uin_size,
1115                            struct mlx5_cmd_msg *out, void *uout, int uout_size,
1116                            mlx5_cmd_cbk_t callback,
1117                            void *context, int page_queue, u8 *status)
1118 {
1119         struct mlx5_cmd *cmd = &dev->cmd;
1120         struct mlx5_cmd_work_ent *ent;
1121         struct mlx5_cmd_stats *stats;
1122         int err = 0;
1123         s64 ds;
1124         u16 op;
1125
1126         if (callback && page_queue)
1127                 return -EINVAL;
1128
1129         ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback,
1130                         context, page_queue);
1131         if (IS_ERR(ent))
1132                 return PTR_ERR(ent);
1133
1134         if (!callback)
1135                 init_completion(&ent->done);
1136
1137         INIT_WORK(&ent->work, cmd_work_handler);
1138         if (page_queue) {
1139                 cmd_work_handler(&ent->work);
1140         } else if (!queue_work(cmd->wq, &ent->work)) {
1141                 mlx5_core_warn(dev, "failed to queue work\n");
1142                 err = -ENOMEM;
1143                 goto out_free;
1144         }
1145
1146         if (!callback) {
1147                 err = wait_func(dev, ent);
1148                 if (err == -ETIMEDOUT)
1149                         goto out;
1150
1151                 ds = ent->ts2 - ent->ts1;
1152                 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
1153                 if (op < ARRAY_SIZE(cmd->stats)) {
1154                         stats = &cmd->stats[op];
1155                         spin_lock_irq(&stats->lock);
1156                         stats->sum += ds;
1157                         ++stats->n;
1158                         spin_unlock_irq(&stats->lock);
1159                 }
1160                 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1161                                    "fw exec time for %s is %lld nsec\n",
1162                                    mlx5_command_str(op), (long long)ds);
1163                 *status = ent->status;
1164                 free_cmd(ent);
1165         }
1166
1167         return err;
1168
1169 out_free:
1170         free_cmd(ent);
1171 out:
1172         return err;
1173 }
1174
1175 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size)
1176 {
1177         size_t delta;
1178         size_t i;
1179
1180         if (to == NULL || from == NULL)
1181                 return (-ENOMEM);
1182
1183         delta = min_t(size_t, size, sizeof(to->first.data));
1184         memcpy(to->first.data, from, delta);
1185         from = (char *)from + delta;
1186         size -= delta;
1187
1188         for (i = 0; size != 0; i++) {
1189                 struct mlx5_cmd_prot_block *block;
1190
1191                 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE);
1192
1193                 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
1194                 memcpy(block->data, from, delta);
1195                 from = (char *)from + delta;
1196                 size -= delta;
1197         }
1198         return (0);
1199 }
1200
1201 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1202 {
1203         size_t delta;
1204         size_t i;
1205
1206         if (to == NULL || from == NULL)
1207                 return (-ENOMEM);
1208
1209         delta = min_t(size_t, size, sizeof(from->first.data));
1210         memcpy(to, from->first.data, delta);
1211         to = (char *)to + delta;
1212         size -= delta;
1213
1214         for (i = 0; size != 0; i++) {
1215                 struct mlx5_cmd_prot_block *block;
1216
1217                 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE);
1218
1219                 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
1220                 memcpy(to, block->data, delta);
1221                 to = (char *)to + delta;
1222                 size -= delta;
1223         }
1224         return (0);
1225 }
1226
1227 static struct mlx5_cmd_msg *
1228 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size)
1229 {
1230         struct mlx5_cmd_msg *msg;
1231         size_t blen;
1232         size_t n;
1233         size_t i;
1234
1235         blen = size - min_t(size_t, sizeof(msg->first.data), size);
1236         n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE);
1237
1238         msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE));
1239         if (msg == NULL)
1240                 return (ERR_PTR(-ENOMEM));
1241
1242         for (i = 0; i != n; i++) {
1243                 struct mlx5_cmd_prot_block *block;
1244
1245                 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
1246
1247                 memset(block, 0, MLX5_CMD_MBOX_SIZE);
1248
1249                 if (i != (n - 1)) {
1250                         u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE);
1251                         block->next = cpu_to_be64(dma);
1252                 }
1253                 block->block_num = cpu_to_be32(i);
1254         }
1255
1256         /* make sure initial data is written to RAM */
1257         mlx5_fwp_flush(msg);
1258
1259         return (msg);
1260 }
1261
1262 static void
1263 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1264 {
1265
1266         mlx5_fwp_free(msg);
1267 }
1268
1269 static void set_wqname(struct mlx5_core_dev *dev)
1270 {
1271         struct mlx5_cmd *cmd = &dev->cmd;
1272
1273         snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1274                  dev_name(&dev->pdev->dev));
1275 }
1276
1277 static void clean_debug_files(struct mlx5_core_dev *dev)
1278 {
1279 }
1280
1281
1282 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1283 {
1284         struct mlx5_cmd *cmd = &dev->cmd;
1285         int i;
1286
1287         for (i = 0; i < cmd->max_reg_cmds; i++)
1288                 down(&cmd->sem);
1289
1290         down(&cmd->pages_sem);
1291
1292         flush_workqueue(cmd->wq);
1293
1294         cmd->mode = CMD_MODE_EVENTS;
1295
1296         up(&cmd->pages_sem);
1297         for (i = 0; i < cmd->max_reg_cmds; i++)
1298                 up(&cmd->sem);
1299 }
1300
1301 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1302 {
1303         struct mlx5_cmd *cmd = &dev->cmd;
1304
1305         synchronize_irq(dev->priv.eq_table.pages_eq.irqn);
1306         flush_workqueue(dev->priv.pg_wq);
1307         cmd->moving_to_polling = 1;
1308         flush_workqueue(cmd->wq);
1309         cmd->mode = CMD_MODE_POLLING;
1310         cmd->moving_to_polling = 0;
1311 }
1312
1313 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1314 {
1315         unsigned long flags;
1316
1317         if (msg->cache) {
1318                 spin_lock_irqsave(&msg->cache->lock, flags);
1319                 list_add_tail(&msg->list, &msg->cache->head);
1320                 spin_unlock_irqrestore(&msg->cache->lock, flags);
1321         } else {
1322                 mlx5_free_cmd_msg(dev, msg);
1323         }
1324 }
1325
1326 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector)
1327 {
1328         struct mlx5_cmd *cmd = &dev->cmd;
1329         struct mlx5_cmd_work_ent *ent;
1330         int i;
1331
1332         /* make sure data gets read from RAM */
1333         mlx5_fwp_invalidate(cmd->cmd_page);
1334
1335         while (vector != 0) {
1336                 i = ffs(vector) - 1;
1337                 vector &= ~(1U << i);
1338                 ent = cmd->ent_arr[i];
1339                 ent->ts2 = ktime_get_ns();
1340                 memcpy(ent->out->first.data, ent->lay->out,
1341                        sizeof(ent->lay->out));
1342                 /* make sure data gets read from RAM */
1343                 mlx5_fwp_invalidate(ent->out);
1344                 dump_command(dev, ent, 0);
1345                 if (!ent->ret) {
1346                         if (!cmd->checksum_disabled)
1347                                 ent->ret = verify_signature(ent);
1348                         else
1349                                 ent->ret = 0;
1350                         ent->status = ent->lay->status_own >> 1;
1351
1352                         mlx5_core_dbg(dev,
1353                                       "FW command ret 0x%x, status %s(0x%x)\n",
1354                                       ent->ret,
1355                                       deliv_status_to_str(ent->status),
1356                                       ent->status);
1357                 }
1358                 free_ent(cmd, ent->idx);
1359                 complete_command(ent);
1360         }
1361 }
1362 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1363
1364 void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
1365 {
1366         unsigned long vector;
1367         int i = 0;
1368         unsigned long flags;
1369         synchronize_irq(dev->priv.eq_table.cmd_eq.irqn);
1370         spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1371         vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1372         spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1373
1374         if (!vector)
1375                 return;
1376
1377         for (i = 0; i < (1 << dev->cmd.log_sz); i++) {
1378                 struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i];
1379
1380                 if (!test_bit(i, &vector))
1381                         continue;
1382
1383                 while (ent->busy)
1384                         usleep_range(1000, 1100);
1385                 free_ent(&dev->cmd, i);
1386                 complete_command(ent);
1387         }
1388 }
1389 EXPORT_SYMBOL(mlx5_trigger_cmd_completions);
1390
1391 static int status_to_err(u8 status)
1392 {
1393         return status ? -1 : 0; /* TBD more meaningful codes */
1394 }
1395
1396 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1397                                       gfp_t gfp)
1398 {
1399         struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1400         struct mlx5_cmd *cmd = &dev->cmd;
1401         struct cache_ent *ent = NULL;
1402
1403         if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1404                 ent = &cmd->cache.large;
1405         else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1406                 ent = &cmd->cache.med;
1407
1408         if (ent) {
1409                 spin_lock_irq(&ent->lock);
1410                 if (!list_empty(&ent->head)) {
1411                         msg = list_entry(ent->head.next, struct mlx5_cmd_msg,
1412                                          list);
1413                         list_del(&msg->list);
1414                 }
1415                 spin_unlock_irq(&ent->lock);
1416         }
1417
1418         if (IS_ERR(msg))
1419                 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1420
1421         return msg;
1422 }
1423
1424 static int is_manage_pages(struct mlx5_inbox_hdr *in)
1425 {
1426         return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1427 }
1428
1429 static int cmd_exec_helper(struct mlx5_core_dev *dev,
1430                            void *in, int in_size,
1431                            void *out, int out_size,
1432                            mlx5_cmd_cbk_t callback, void *context)
1433 {
1434         struct mlx5_cmd_msg *inb;
1435         struct mlx5_cmd_msg *outb;
1436         int pages_queue;
1437         const gfp_t gfp = GFP_KERNEL;
1438         int err;
1439         u8 status = 0;
1440
1441         pages_queue = is_manage_pages(in);
1442
1443         inb = alloc_msg(dev, in_size, gfp);
1444         if (IS_ERR(inb)) {
1445                 err = PTR_ERR(inb);
1446                 return err;
1447         }
1448
1449         err = mlx5_copy_to_msg(inb, in, in_size);
1450         if (err) {
1451                 mlx5_core_warn(dev, "err %d\n", err);
1452                 goto out_in;
1453         }
1454
1455         outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1456         if (IS_ERR(outb)) {
1457                 err = PTR_ERR(outb);
1458                 goto out_in;
1459         }
1460
1461         err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback,
1462                               context, pages_queue, &status);
1463         if (err) {
1464                 if (err == -ETIMEDOUT)
1465                         return err;
1466                 goto out_out;
1467         }
1468
1469         mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1470         if (status) {
1471                 err = status_to_err(status);
1472                 goto out_out;
1473         }
1474
1475         if (callback)
1476                 return err;
1477
1478         err = mlx5_copy_from_msg(out, outb, out_size);
1479
1480 out_out:
1481         mlx5_free_cmd_msg(dev, outb);
1482
1483 out_in:
1484         free_msg(dev, inb);
1485         return err;
1486 }
1487
1488 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1489                   int out_size)
1490 {
1491         return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL);
1492 }
1493 EXPORT_SYMBOL(mlx5_cmd_exec);
1494
1495 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1496                      void *out, int out_size, mlx5_cmd_cbk_t callback,
1497                      void *context)
1498 {
1499         return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context);
1500 }
1501 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1502
1503 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1504 {
1505         struct mlx5_cmd *cmd = &dev->cmd;
1506         struct mlx5_cmd_msg *msg;
1507         struct mlx5_cmd_msg *n;
1508
1509         list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1510                 list_del(&msg->list);
1511                 mlx5_free_cmd_msg(dev, msg);
1512         }
1513
1514         list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1515                 list_del(&msg->list);
1516                 mlx5_free_cmd_msg(dev, msg);
1517         }
1518 }
1519
1520 static int create_msg_cache(struct mlx5_core_dev *dev)
1521 {
1522         struct mlx5_cmd *cmd = &dev->cmd;
1523         struct mlx5_cmd_msg *msg;
1524         int err;
1525         int i;
1526
1527         spin_lock_init(&cmd->cache.large.lock);
1528         INIT_LIST_HEAD(&cmd->cache.large.head);
1529         spin_lock_init(&cmd->cache.med.lock);
1530         INIT_LIST_HEAD(&cmd->cache.med.head);
1531
1532         for (i = 0; i < NUM_LONG_LISTS; i++) {
1533                 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1534                 if (IS_ERR(msg)) {
1535                         err = PTR_ERR(msg);
1536                         goto ex_err;
1537                 }
1538                 msg->cache = &cmd->cache.large;
1539                 list_add_tail(&msg->list, &cmd->cache.large.head);
1540         }
1541
1542         for (i = 0; i < NUM_MED_LISTS; i++) {
1543                 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1544                 if (IS_ERR(msg)) {
1545                         err = PTR_ERR(msg);
1546                         goto ex_err;
1547                 }
1548                 msg->cache = &cmd->cache.med;
1549                 list_add_tail(&msg->list, &cmd->cache.med.head);
1550         }
1551
1552         return 0;
1553
1554 ex_err:
1555         destroy_msg_cache(dev);
1556         return err;
1557 }
1558
1559 static int
1560 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1561 {
1562         int err;
1563
1564         sx_init(&cmd->dma_sx, "MLX5-DMA-SX");
1565         mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF);
1566         cv_init(&cmd->dma_cv, "MLX5-DMA-CV");
1567
1568         /*
1569          * Create global DMA descriptor tag for allocating
1570          * 4K firmware pages:
1571          */
1572         err = -bus_dma_tag_create(
1573             bus_get_dma_tag(dev->pdev->dev.bsddev),
1574             MLX5_ADAPTER_PAGE_SIZE,     /* alignment */
1575             0,                          /* no boundary */
1576             BUS_SPACE_MAXADDR,          /* lowaddr */
1577             BUS_SPACE_MAXADDR,          /* highaddr */
1578             NULL, NULL,                 /* filter, filterarg */
1579             MLX5_ADAPTER_PAGE_SIZE,     /* maxsize */
1580             1,                          /* nsegments */
1581             MLX5_ADAPTER_PAGE_SIZE,     /* maxsegsize */
1582             0,                          /* flags */
1583             NULL, NULL,                 /* lockfunc, lockfuncarg */
1584             &cmd->dma_tag);
1585         if (err != 0)
1586                 goto failure_destroy_sx;
1587
1588         cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
1589         if (cmd->cmd_page == NULL) {
1590                 err = -ENOMEM;
1591                 goto failure_alloc_page;
1592         }
1593         cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0);
1594         cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0);
1595         return (0);
1596
1597 failure_alloc_page:
1598         bus_dma_tag_destroy(cmd->dma_tag);
1599
1600 failure_destroy_sx:
1601         cv_destroy(&cmd->dma_cv);
1602         mtx_destroy(&cmd->dma_mtx);
1603         sx_destroy(&cmd->dma_sx);
1604         return (err);
1605 }
1606
1607 static void
1608 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1609 {
1610
1611         mlx5_fwp_free(cmd->cmd_page);
1612         bus_dma_tag_destroy(cmd->dma_tag);
1613         cv_destroy(&cmd->dma_cv);
1614         mtx_destroy(&cmd->dma_mtx);
1615         sx_destroy(&cmd->dma_sx);
1616 }
1617
1618 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1619 {
1620         struct mlx5_cmd *cmd = &dev->cmd;
1621         u32 cmd_h, cmd_l;
1622         u16 cmd_if_rev;
1623         int err;
1624         int i;
1625
1626         cmd_if_rev = cmdif_rev_get(dev);
1627         if (cmd_if_rev != CMD_IF_REV) {
1628                 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev);
1629                 return -EINVAL;
1630         }
1631
1632         err = alloc_cmd_page(dev, cmd);
1633         if (err)
1634                 goto err_free_pool;
1635
1636         cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1637         cmd->log_sz = cmd_l >> 4 & 0xf;
1638         cmd->log_stride = cmd_l & 0xf;
1639         if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1640                 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz);
1641                 err = -EINVAL;
1642                 goto err_free_page;
1643         }
1644
1645         if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1646                 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n");
1647                 err = -EINVAL;
1648                 goto err_free_page;
1649         }
1650
1651         cmd->checksum_disabled = 1;
1652         cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1653         cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1654
1655         cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1656         if (cmd->cmdif_rev > CMD_IF_REV) {
1657                 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev);
1658                 err = -ENOTSUPP;
1659                 goto err_free_page;
1660         }
1661
1662         spin_lock_init(&cmd->alloc_lock);
1663         spin_lock_init(&cmd->token_lock);
1664         for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1665                 spin_lock_init(&cmd->stats[i].lock);
1666
1667         sema_init(&cmd->sem, cmd->max_reg_cmds);
1668         sema_init(&cmd->pages_sem, 1);
1669
1670         cmd_h = (u32)((u64)(cmd->dma) >> 32);
1671         cmd_l = (u32)(cmd->dma);
1672         if (cmd_l & 0xfff) {
1673                 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n");
1674                 err = -ENOMEM;
1675                 goto err_free_page;
1676         }
1677
1678         iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1679         iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1680
1681         /* Make sure firmware sees the complete address before we proceed */
1682         wmb();
1683
1684         mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1685
1686         cmd->mode = CMD_MODE_POLLING;
1687
1688         err = create_msg_cache(dev);
1689         if (err) {
1690                 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n");
1691                 goto err_free_page;
1692         }
1693
1694         set_wqname(dev);
1695         cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1696         if (!cmd->wq) {
1697                 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n");
1698                 err = -ENOMEM;
1699                 goto err_cache;
1700         }
1701
1702         return 0;
1703
1704 err_cache:
1705         destroy_msg_cache(dev);
1706
1707 err_free_page:
1708         free_cmd_page(dev, cmd);
1709
1710 err_free_pool:
1711         return err;
1712 }
1713 EXPORT_SYMBOL(mlx5_cmd_init);
1714
1715 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1716 {
1717         struct mlx5_cmd *cmd = &dev->cmd;
1718
1719         clean_debug_files(dev);
1720         destroy_workqueue(cmd->wq);
1721         destroy_msg_cache(dev);
1722         free_cmd_page(dev, cmd);
1723 }
1724 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1725
1726 static const char *cmd_status_str(u8 status)
1727 {
1728         switch (status) {
1729         case MLX5_CMD_STAT_OK:
1730                 return "OK";
1731         case MLX5_CMD_STAT_INT_ERR:
1732                 return "internal error";
1733         case MLX5_CMD_STAT_BAD_OP_ERR:
1734                 return "bad operation";
1735         case MLX5_CMD_STAT_BAD_PARAM_ERR:
1736                 return "bad parameter";
1737         case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1738                 return "bad system state";
1739         case MLX5_CMD_STAT_BAD_RES_ERR:
1740                 return "bad resource";
1741         case MLX5_CMD_STAT_RES_BUSY:
1742                 return "resource busy";
1743         case MLX5_CMD_STAT_LIM_ERR:
1744                 return "limits exceeded";
1745         case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1746                 return "bad resource state";
1747         case MLX5_CMD_STAT_IX_ERR:
1748                 return "bad index";
1749         case MLX5_CMD_STAT_NO_RES_ERR:
1750                 return "no resources";
1751         case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1752                 return "bad input length";
1753         case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1754                 return "bad output length";
1755         case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1756                 return "bad QP state";
1757         case MLX5_CMD_STAT_BAD_PKT_ERR:
1758                 return "bad packet (discarded)";
1759         case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1760                 return "bad size too many outstanding CQEs";
1761         default:
1762                 return "unknown status";
1763         }
1764 }
1765
1766 static int cmd_status_to_err_helper(u8 status)
1767 {
1768         switch (status) {
1769         case MLX5_CMD_STAT_OK:                          return 0;
1770         case MLX5_CMD_STAT_INT_ERR:                     return -EIO;
1771         case MLX5_CMD_STAT_BAD_OP_ERR:                  return -EINVAL;
1772         case MLX5_CMD_STAT_BAD_PARAM_ERR:               return -EINVAL;
1773         case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:           return -EIO;
1774         case MLX5_CMD_STAT_BAD_RES_ERR:                 return -EINVAL;
1775         case MLX5_CMD_STAT_RES_BUSY:                    return -EBUSY;
1776         case MLX5_CMD_STAT_LIM_ERR:                     return -ENOMEM;
1777         case MLX5_CMD_STAT_BAD_RES_STATE_ERR:           return -EINVAL;
1778         case MLX5_CMD_STAT_IX_ERR:                      return -EINVAL;
1779         case MLX5_CMD_STAT_NO_RES_ERR:                  return -EAGAIN;
1780         case MLX5_CMD_STAT_BAD_INP_LEN_ERR:             return -EIO;
1781         case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:            return -EIO;
1782         case MLX5_CMD_STAT_BAD_QP_STATE_ERR:            return -EINVAL;
1783         case MLX5_CMD_STAT_BAD_PKT_ERR:                 return -EINVAL;
1784         case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:      return -EINVAL;
1785         default:                                        return -EIO;
1786         }
1787 }
1788
1789 /* this will be available till all the commands use set/get macros */
1790 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1791 {
1792         if (!hdr->status)
1793                 return 0;
1794
1795         printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome));
1796
1797         return cmd_status_to_err_helper(hdr->status);
1798 }
1799
1800 int mlx5_cmd_status_to_err_v2(void *ptr)
1801 {
1802         u32     syndrome;
1803         u8      status;
1804
1805         status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1806         if (!status)
1807                 return 0;
1808
1809         syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1810
1811         printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome);
1812
1813         return cmd_status_to_err_helper(status);
1814 }
1815