2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/pci.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/random.h>
35 #include <linux/io-mapping.h>
36 #include <linux/hardirq.h>
37 #include <linux/ktime.h>
38 #include <dev/mlx5/driver.h>
40 #include "mlx5_core.h"
42 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size);
43 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
44 struct mlx5_cmd_msg *msg);
45 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE,
61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
65 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
79 struct mlx5_cmd_msg *in,
81 struct mlx5_cmd_msg *out,
82 void *uout, int uout_size,
84 void *context, int page_queue)
86 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
87 struct mlx5_cmd_work_ent *ent;
89 ent = kzalloc(sizeof(*ent), alloc_flags);
91 return ERR_PTR(-ENOMEM);
94 ent->uin_size = uin_size;
97 ent->uout_size = uout_size;
99 ent->context = context;
101 ent->page_queue = page_queue;
106 static u8 alloc_token(struct mlx5_cmd *cmd)
110 spin_lock(&cmd->token_lock);
115 spin_unlock(&cmd->token_lock);
120 static int alloc_ent(struct mlx5_cmd_work_ent *ent)
123 struct mlx5_cmd *cmd = ent->cmd;
124 struct mlx5_core_dev *dev =
125 container_of(cmd, struct mlx5_core_dev, cmd);
126 int ret = cmd->max_reg_cmds;
128 spin_lock_irqsave(&cmd->alloc_lock, flags);
129 if (!ent->page_queue) {
130 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
131 if (ret >= cmd->max_reg_cmds)
135 if (dev->state != MLX5_DEVICE_STATE_UP)
141 clear_bit(ent->idx, &cmd->bitmask);
142 cmd->ent_arr[ent->idx] = ent;
144 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
149 static void free_ent(struct mlx5_cmd *cmd, int idx)
153 spin_lock_irqsave(&cmd->alloc_lock, flags);
154 set_bit(idx, &cmd->bitmask);
155 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
158 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
160 return cmd->cmd_buf + (idx << cmd->log_stride);
163 static u8 xor8_buf(void *buf, int len)
169 for (i = 0; i < len; i++)
175 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
177 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
180 if (xor8_buf(block, sizeof(*block)) != 0xff)
186 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
189 block->token = token;
191 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
192 sizeof(block->data) - 2);
193 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
198 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
202 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
203 struct mlx5_cmd_prot_block *block;
205 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
207 /* compute signature */
208 calc_block_sig(block, token, csum);
210 /* check for last block */
211 if (block->next == 0)
215 /* make sure data gets written to RAM */
219 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
221 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
222 calc_chain_sig(ent->in, ent->token, csum);
223 calc_chain_sig(ent->out, ent->token, csum);
226 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
228 struct mlx5_core_dev *dev = container_of(ent->cmd,
229 struct mlx5_core_dev, cmd);
230 int poll_end = jiffies +
231 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
235 own = ent->lay->status_own;
236 if (!(own & CMD_OWNER_HW) ||
237 dev->state != MLX5_DEVICE_STATE_UP) {
241 usleep_range(5000, 10000);
242 } while (time_before(jiffies, poll_end));
244 ent->ret = -ETIMEDOUT;
247 static void free_cmd(struct mlx5_cmd_work_ent *ent)
253 verify_signature(struct mlx5_cmd_work_ent *ent)
255 struct mlx5_cmd_msg *msg = ent->out;
260 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
264 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
265 struct mlx5_cmd_prot_block *block;
267 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
269 /* compute signature */
270 err = verify_block_sig(block);
274 /* check for last block */
275 if (block->next == 0)
281 static void dump_buf(void *buf, int size, int data_only, int offset)
286 for (i = 0; i < size; i += 16) {
287 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
288 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
297 const char *mlx5_command_str(int command)
300 case MLX5_CMD_OP_QUERY_HCA_CAP:
301 return "QUERY_HCA_CAP";
303 case MLX5_CMD_OP_SET_HCA_CAP:
304 return "SET_HCA_CAP";
306 case MLX5_CMD_OP_QUERY_ADAPTER:
307 return "QUERY_ADAPTER";
309 case MLX5_CMD_OP_INIT_HCA:
312 case MLX5_CMD_OP_TEARDOWN_HCA:
313 return "TEARDOWN_HCA";
315 case MLX5_CMD_OP_ENABLE_HCA:
316 return "MLX5_CMD_OP_ENABLE_HCA";
318 case MLX5_CMD_OP_DISABLE_HCA:
319 return "MLX5_CMD_OP_DISABLE_HCA";
321 case MLX5_CMD_OP_QUERY_PAGES:
322 return "QUERY_PAGES";
324 case MLX5_CMD_OP_MANAGE_PAGES:
325 return "MANAGE_PAGES";
327 case MLX5_CMD_OP_QUERY_ISSI:
330 case MLX5_CMD_OP_SET_ISSI:
333 case MLX5_CMD_OP_CREATE_MKEY:
334 return "CREATE_MKEY";
336 case MLX5_CMD_OP_QUERY_MKEY:
339 case MLX5_CMD_OP_DESTROY_MKEY:
340 return "DESTROY_MKEY";
342 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
343 return "QUERY_SPECIAL_CONTEXTS";
345 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
346 return "PAGE_FAULT_RESUME";
348 case MLX5_CMD_OP_CREATE_EQ:
351 case MLX5_CMD_OP_DESTROY_EQ:
354 case MLX5_CMD_OP_QUERY_EQ:
357 case MLX5_CMD_OP_GEN_EQE:
360 case MLX5_CMD_OP_CREATE_CQ:
363 case MLX5_CMD_OP_DESTROY_CQ:
366 case MLX5_CMD_OP_QUERY_CQ:
369 case MLX5_CMD_OP_MODIFY_CQ:
372 case MLX5_CMD_OP_CREATE_QP:
375 case MLX5_CMD_OP_DESTROY_QP:
378 case MLX5_CMD_OP_RST2INIT_QP:
379 return "RST2INIT_QP";
381 case MLX5_CMD_OP_INIT2RTR_QP:
382 return "INIT2RTR_QP";
384 case MLX5_CMD_OP_RTR2RTS_QP:
387 case MLX5_CMD_OP_RTS2RTS_QP:
390 case MLX5_CMD_OP_SQERR2RTS_QP:
391 return "SQERR2RTS_QP";
393 case MLX5_CMD_OP_2ERR_QP:
396 case MLX5_CMD_OP_2RST_QP:
399 case MLX5_CMD_OP_QUERY_QP:
402 case MLX5_CMD_OP_SQD_RTS_QP:
405 case MLX5_CMD_OP_MAD_IFC:
408 case MLX5_CMD_OP_INIT2INIT_QP:
409 return "INIT2INIT_QP";
411 case MLX5_CMD_OP_CREATE_PSV:
414 case MLX5_CMD_OP_DESTROY_PSV:
415 return "DESTROY_PSV";
417 case MLX5_CMD_OP_CREATE_SRQ:
420 case MLX5_CMD_OP_DESTROY_SRQ:
421 return "DESTROY_SRQ";
423 case MLX5_CMD_OP_QUERY_SRQ:
426 case MLX5_CMD_OP_ARM_RQ:
429 case MLX5_CMD_OP_CREATE_XRC_SRQ:
430 return "CREATE_XRC_SRQ";
432 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
433 return "DESTROY_XRC_SRQ";
435 case MLX5_CMD_OP_QUERY_XRC_SRQ:
436 return "QUERY_XRC_SRQ";
438 case MLX5_CMD_OP_ARM_XRC_SRQ:
439 return "ARM_XRC_SRQ";
441 case MLX5_CMD_OP_CREATE_DCT:
444 case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
445 return "SET_DC_CNAK_TRACE";
447 case MLX5_CMD_OP_DESTROY_DCT:
448 return "DESTROY_DCT";
450 case MLX5_CMD_OP_DRAIN_DCT:
453 case MLX5_CMD_OP_QUERY_DCT:
456 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
457 return "ARM_DCT_FOR_KEY_VIOLATION";
459 case MLX5_CMD_OP_QUERY_VPORT_STATE:
460 return "QUERY_VPORT_STATE";
462 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
463 return "MODIFY_VPORT_STATE";
465 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
466 return "QUERY_ESW_VPORT_CONTEXT";
468 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
469 return "MODIFY_ESW_VPORT_CONTEXT";
471 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
472 return "QUERY_NIC_VPORT_CONTEXT";
474 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
475 return "MODIFY_NIC_VPORT_CONTEXT";
477 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
478 return "QUERY_ROCE_ADDRESS";
480 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
481 return "SET_ROCE_ADDRESS";
483 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
484 return "QUERY_HCA_VPORT_CONTEXT";
486 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
487 return "MODIFY_HCA_VPORT_CONTEXT";
489 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
490 return "QUERY_HCA_VPORT_GID";
492 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
493 return "QUERY_HCA_VPORT_PKEY";
495 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
496 return "QUERY_VPORT_COUNTER";
498 case MLX5_CMD_OP_SET_WOL_ROL:
499 return "SET_WOL_ROL";
501 case MLX5_CMD_OP_QUERY_WOL_ROL:
502 return "QUERY_WOL_ROL";
504 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
505 return "ALLOC_Q_COUNTER";
507 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
508 return "DEALLOC_Q_COUNTER";
510 case MLX5_CMD_OP_QUERY_Q_COUNTER:
511 return "QUERY_Q_COUNTER";
513 case MLX5_CMD_OP_ALLOC_PD:
516 case MLX5_CMD_OP_DEALLOC_PD:
519 case MLX5_CMD_OP_ALLOC_UAR:
522 case MLX5_CMD_OP_DEALLOC_UAR:
523 return "DEALLOC_UAR";
525 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
526 return "CONFIG_INT_MODERATION";
528 case MLX5_CMD_OP_ATTACH_TO_MCG:
529 return "ATTACH_TO_MCG";
531 case MLX5_CMD_OP_DETACH_FROM_MCG:
532 return "DETACH_FROM_MCG";
534 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
535 return "GET_DROPPED_PACKET_LOG";
537 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
538 return "QUERY_MAD_DEMUX";
540 case MLX5_CMD_OP_SET_MAD_DEMUX:
541 return "SET_MAD_DEMUX";
543 case MLX5_CMD_OP_NOP:
546 case MLX5_CMD_OP_ALLOC_XRCD:
549 case MLX5_CMD_OP_DEALLOC_XRCD:
550 return "DEALLOC_XRCD";
552 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
553 return "ALLOC_TRANSPORT_DOMAIN";
555 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
556 return "DEALLOC_TRANSPORT_DOMAIN";
558 case MLX5_CMD_OP_QUERY_CONG_STATUS:
559 return "QUERY_CONG_STATUS";
561 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
562 return "MODIFY_CONG_STATUS";
564 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
565 return "QUERY_CONG_PARAMS";
567 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
568 return "MODIFY_CONG_PARAMS";
570 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
571 return "QUERY_CONG_STATISTICS";
573 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
574 return "ADD_VXLAN_UDP_DPORT";
576 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
577 return "DELETE_VXLAN_UDP_DPORT";
579 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
580 return "SET_L2_TABLE_ENTRY";
582 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
583 return "QUERY_L2_TABLE_ENTRY";
585 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
586 return "DELETE_L2_TABLE_ENTRY";
588 case MLX5_CMD_OP_CREATE_RMP:
591 case MLX5_CMD_OP_MODIFY_RMP:
594 case MLX5_CMD_OP_DESTROY_RMP:
595 return "DESTROY_RMP";
597 case MLX5_CMD_OP_QUERY_RMP:
600 case MLX5_CMD_OP_CREATE_RQT:
603 case MLX5_CMD_OP_MODIFY_RQT:
606 case MLX5_CMD_OP_DESTROY_RQT:
607 return "DESTROY_RQT";
609 case MLX5_CMD_OP_QUERY_RQT:
612 case MLX5_CMD_OP_ACCESS_REG:
613 return "MLX5_CMD_OP_ACCESS_REG";
615 case MLX5_CMD_OP_CREATE_SQ:
618 case MLX5_CMD_OP_MODIFY_SQ:
621 case MLX5_CMD_OP_DESTROY_SQ:
624 case MLX5_CMD_OP_QUERY_SQ:
627 case MLX5_CMD_OP_CREATE_RQ:
630 case MLX5_CMD_OP_MODIFY_RQ:
633 case MLX5_CMD_OP_DESTROY_RQ:
636 case MLX5_CMD_OP_QUERY_RQ:
639 case MLX5_CMD_OP_CREATE_TIR:
642 case MLX5_CMD_OP_MODIFY_TIR:
645 case MLX5_CMD_OP_DESTROY_TIR:
646 return "DESTROY_TIR";
648 case MLX5_CMD_OP_QUERY_TIR:
651 case MLX5_CMD_OP_CREATE_TIS:
654 case MLX5_CMD_OP_MODIFY_TIS:
657 case MLX5_CMD_OP_DESTROY_TIS:
658 return "DESTROY_TIS";
660 case MLX5_CMD_OP_QUERY_TIS:
663 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
664 return "CREATE_FLOW_TABLE";
666 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
667 return "DESTROY_FLOW_TABLE";
669 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
670 return "QUERY_FLOW_TABLE";
672 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
673 return "CREATE_FLOW_GROUP";
675 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
676 return "DESTROY_FLOW_GROUP";
678 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
679 return "QUERY_FLOW_GROUP";
681 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
682 return "SET_FLOW_TABLE_ENTRY";
684 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
685 return "QUERY_FLOW_TABLE_ENTRY";
687 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
688 return "DELETE_FLOW_TABLE_ENTRY";
690 case MLX5_CMD_OP_SET_DIAGNOSTICS:
691 return "MLX5_CMD_OP_SET_DIAGNOSTICS";
693 case MLX5_CMD_OP_QUERY_DIAGNOSTICS:
694 return "MLX5_CMD_OP_QUERY_DIAGNOSTICS";
696 default: return "unknown command opcode";
700 static void dump_command(struct mlx5_core_dev *dev,
701 struct mlx5_cmd_work_ent *ent, int input)
703 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
704 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
708 int msg_len = input ? ent->uin_size : ent->uout_size;
711 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
714 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
715 "dump command data %s(0x%x) %s\n",
716 mlx5_command_str(op), op,
717 input ? "INPUT" : "OUTPUT");
719 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
720 mlx5_command_str(op), op,
721 input ? "INPUT" : "OUTPUT");
725 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
726 offset += sizeof(ent->lay->in);
728 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
729 offset += sizeof(ent->lay->out);
732 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
733 offset += sizeof(*ent->lay);
736 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
737 struct mlx5_cmd_prot_block *block;
739 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
742 if (offset >= msg_len)
744 dump_len = min_t(int,
745 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset);
747 dump_buf(block->data, dump_len, 1, offset);
748 offset += MLX5_CMD_DATA_BLOCK_SIZE;
750 mlx5_core_dbg(dev, "command block:\n");
751 dump_buf(block, sizeof(*block), 0, offset);
752 offset += sizeof(*block);
755 /* check for last block */
756 if (block->next == 0)
764 static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode,
765 struct mlx5_outbox_hdr *hdr)
771 case MLX5_CMD_OP_TEARDOWN_HCA:
772 case MLX5_CMD_OP_DISABLE_HCA:
773 case MLX5_CMD_OP_MANAGE_PAGES:
774 case MLX5_CMD_OP_DESTROY_MKEY:
775 case MLX5_CMD_OP_DESTROY_EQ:
776 case MLX5_CMD_OP_DESTROY_CQ:
777 case MLX5_CMD_OP_DESTROY_QP:
778 case MLX5_CMD_OP_DESTROY_PSV:
779 case MLX5_CMD_OP_DESTROY_SRQ:
780 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
781 case MLX5_CMD_OP_DESTROY_DCT:
782 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
783 case MLX5_CMD_OP_DEALLOC_PD:
784 case MLX5_CMD_OP_DEALLOC_UAR:
785 case MLX5_CMD_OP_DETACH_FROM_MCG:
786 case MLX5_CMD_OP_DEALLOC_XRCD:
787 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
788 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
789 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
790 case MLX5_CMD_OP_DESTROY_LAG:
791 case MLX5_CMD_OP_DESTROY_VPORT_LAG:
792 case MLX5_CMD_OP_DESTROY_TIR:
793 case MLX5_CMD_OP_DESTROY_SQ:
794 case MLX5_CMD_OP_DESTROY_RQ:
795 case MLX5_CMD_OP_DESTROY_RMP:
796 case MLX5_CMD_OP_DESTROY_TIS:
797 case MLX5_CMD_OP_DESTROY_RQT:
798 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
799 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
800 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
801 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
802 case MLX5_CMD_OP_2ERR_QP:
803 case MLX5_CMD_OP_2RST_QP:
804 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
805 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
806 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
807 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
808 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
809 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
810 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
811 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
812 case MLX5_CMD_OP_MODIFY_SQ:
813 case MLX5_CMD_OP_MODIFY_RQ:
814 case MLX5_CMD_OP_MODIFY_TIS:
815 case MLX5_CMD_OP_MODIFY_LAG:
816 case MLX5_CMD_OP_MODIFY_TIR:
817 case MLX5_CMD_OP_MODIFY_RMP:
818 case MLX5_CMD_OP_MODIFY_RQT:
819 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
820 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
821 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
822 case MLX5_CMD_OP_MODIFY_CQ:
823 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
824 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
825 case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP:
826 case MLX5_CMD_OP_ACCESS_REG:
827 case MLX5_CMD_OP_DRAIN_DCT:
830 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
831 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
832 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
833 case MLX5_CMD_OP_ALLOC_PD:
834 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
835 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
836 case MLX5_CMD_OP_ALLOC_UAR:
837 case MLX5_CMD_OP_ALLOC_XRCD:
838 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
839 case MLX5_CMD_OP_ARM_RQ:
840 case MLX5_CMD_OP_ARM_XRC_SRQ:
841 case MLX5_CMD_OP_ATTACH_TO_MCG:
842 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
843 case MLX5_CMD_OP_CREATE_CQ:
844 case MLX5_CMD_OP_CREATE_DCT:
845 case MLX5_CMD_OP_CREATE_EQ:
846 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
847 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
848 case MLX5_CMD_OP_CREATE_LAG:
849 case MLX5_CMD_OP_CREATE_MKEY:
850 case MLX5_CMD_OP_CREATE_PSV:
851 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
852 case MLX5_CMD_OP_CREATE_QP:
853 case MLX5_CMD_OP_CREATE_RMP:
854 case MLX5_CMD_OP_CREATE_RQ:
855 case MLX5_CMD_OP_CREATE_RQT:
856 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
857 case MLX5_CMD_OP_CREATE_SQ:
858 case MLX5_CMD_OP_CREATE_SRQ:
859 case MLX5_CMD_OP_CREATE_TIR:
860 case MLX5_CMD_OP_CREATE_TIS:
861 case MLX5_CMD_OP_CREATE_VPORT_LAG:
862 case MLX5_CMD_OP_CREATE_XRC_SRQ:
863 case MLX5_CMD_OP_ENABLE_HCA:
864 case MLX5_CMD_OP_GEN_EQE:
865 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
866 case MLX5_CMD_OP_INIT2INIT_QP:
867 case MLX5_CMD_OP_INIT2RTR_QP:
868 case MLX5_CMD_OP_INIT_HCA:
869 case MLX5_CMD_OP_MAD_IFC:
870 case MLX5_CMD_OP_NOP:
871 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
872 case MLX5_CMD_OP_QUERY_ADAPTER:
873 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
874 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
875 case MLX5_CMD_OP_QUERY_CONG_STATUS:
876 case MLX5_CMD_OP_QUERY_CQ:
877 case MLX5_CMD_OP_QUERY_DCT:
878 case MLX5_CMD_OP_QUERY_EQ:
879 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
880 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
881 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
882 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
883 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
884 case MLX5_CMD_OP_QUERY_HCA_CAP:
885 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
886 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
887 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
888 case MLX5_CMD_OP_QUERY_ISSI:
889 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
890 case MLX5_CMD_OP_QUERY_LAG:
891 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
892 case MLX5_CMD_OP_QUERY_MKEY:
893 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
894 case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
895 case MLX5_CMD_OP_QUERY_PAGES:
896 case MLX5_CMD_OP_QUERY_QP:
897 case MLX5_CMD_OP_QUERY_Q_COUNTER:
898 case MLX5_CMD_OP_QUERY_RMP:
899 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
900 case MLX5_CMD_OP_QUERY_RQ:
901 case MLX5_CMD_OP_QUERY_RQT:
902 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
903 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
904 case MLX5_CMD_OP_QUERY_SQ:
905 case MLX5_CMD_OP_QUERY_SRQ:
906 case MLX5_CMD_OP_QUERY_TIR:
907 case MLX5_CMD_OP_QUERY_TIS:
908 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
909 case MLX5_CMD_OP_QUERY_VPORT_STATE:
910 case MLX5_CMD_OP_QUERY_XRC_SRQ:
911 case MLX5_CMD_OP_RST2INIT_QP:
912 case MLX5_CMD_OP_RTR2RTS_QP:
913 case MLX5_CMD_OP_RTS2RTS_QP:
914 case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
915 case MLX5_CMD_OP_SET_HCA_CAP:
916 case MLX5_CMD_OP_SET_ISSI:
917 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
918 case MLX5_CMD_OP_SET_MAD_DEMUX:
919 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
920 case MLX5_CMD_OP_SQD_RTS_QP:
921 case MLX5_CMD_OP_SQERR2RTS_QP:
922 hdr->status = MLX5_CMD_STAT_INT_ERR;
923 hdr->syndrome = 0xFFFFFFFF;
926 mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode);
931 static void complete_command(struct mlx5_cmd_work_ent *ent)
933 struct mlx5_cmd *cmd = ent->cmd;
934 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev,
936 mlx5_cmd_cbk_t callback;
940 struct mlx5_cmd_stats *stats;
943 struct semaphore *sem;
946 sem = &cmd->pages_sem;
950 if (dev->state != MLX5_DEVICE_STATE_UP) {
951 struct mlx5_outbox_hdr *out_hdr =
952 (struct mlx5_outbox_hdr *)ent->out;
953 struct mlx5_inbox_hdr *in_hdr =
954 (struct mlx5_inbox_hdr *)(ent->in->first.data);
955 u16 opcode = be16_to_cpu(in_hdr->opcode);
957 ent->ret = set_internal_err_outbox(dev,
963 ds = ent->ts2 - ent->ts1;
964 if (ent->op < ARRAY_SIZE(cmd->stats)) {
965 stats = &cmd->stats[ent->op];
966 spin_lock_irqsave(&stats->lock, flags);
969 spin_unlock_irqrestore(&stats->lock, flags);
972 callback = ent->callback;
973 context = ent->context;
976 err = mlx5_copy_from_msg(ent->uout,
980 mlx5_free_cmd_msg(dev, ent->out);
981 free_msg(dev, ent->in);
984 callback(err, context);
986 complete(&ent->done);
991 static void cmd_work_handler(struct work_struct *work)
993 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
994 struct mlx5_cmd *cmd = ent->cmd;
995 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
996 struct mlx5_cmd_layout *lay;
997 struct semaphore *sem;
999 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
1000 if (cmd->moving_to_polling) {
1001 mlx5_core_warn(dev, "not expecting command execution, ignoring...\n");
1007 if (alloc_ent(ent) < 0) {
1008 complete_command(ent);
1012 ent->token = alloc_token(cmd);
1013 lay = get_inst(cmd, ent->idx);
1015 memset(lay, 0, sizeof(*lay));
1016 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
1017 ent->op = be32_to_cpu(lay->in[0]) >> 16;
1018 if (ent->in->numpages != 0)
1019 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0));
1020 if (ent->out->numpages != 0)
1021 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0));
1022 lay->inlen = cpu_to_be32(ent->uin_size);
1023 lay->outlen = cpu_to_be32(ent->uout_size);
1024 lay->type = MLX5_PCI_CMD_XPORT;
1025 lay->token = ent->token;
1026 lay->status_own = CMD_OWNER_HW;
1027 set_signature(ent, !cmd->checksum_disabled);
1028 dump_command(dev, ent, 1);
1029 ent->ts1 = ktime_get_ns();
1031 /* ring doorbell after the descriptor is valid */
1032 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
1033 /* make sure data is written to RAM */
1034 mlx5_fwp_flush(cmd->cmd_page);
1035 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
1037 /* if not in polling don't use ent after this point*/
1038 if (cmd->mode == CMD_MODE_POLLING) {
1040 /* make sure we read the descriptor after ownership is SW */
1041 mlx5_cmd_comp_handler(dev, 1U << ent->idx);
1045 static const char *deliv_status_to_str(u8 status)
1048 case MLX5_CMD_DELIVERY_STAT_OK:
1050 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1051 return "signature error";
1052 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1053 return "token error";
1054 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1055 return "bad block number";
1056 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1057 return "output pointer not aligned to block size";
1058 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1059 return "input pointer not aligned to block size";
1060 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1061 return "firmware internal error";
1062 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1063 return "command input length error";
1064 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1065 return "command ouput length error";
1066 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1067 return "reserved fields not cleared";
1068 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1069 return "bad command descriptor type";
1071 return "unknown status code";
1075 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
1077 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
1079 return be16_to_cpu(hdr->opcode);
1082 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
1084 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
1085 struct mlx5_cmd *cmd = &dev->cmd;
1088 if (cmd->mode == CMD_MODE_POLLING) {
1089 wait_for_completion(&ent->done);
1092 if (!wait_for_completion_timeout(&ent->done, timeout))
1098 if (err == -ETIMEDOUT) {
1099 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1100 mlx5_command_str(msg_to_opcode(ent->in)),
1101 msg_to_opcode(ent->in));
1103 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1104 err, deliv_status_to_str(ent->status), ent->status);
1110 * 1. Callback functions may not sleep
1111 * 2. page queue commands do not support asynchrous completion
1113 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1115 struct mlx5_cmd_msg *out, void *uout, int uout_size,
1116 mlx5_cmd_cbk_t callback,
1117 void *context, int page_queue, u8 *status)
1119 struct mlx5_cmd *cmd = &dev->cmd;
1120 struct mlx5_cmd_work_ent *ent;
1121 struct mlx5_cmd_stats *stats;
1126 if (callback && page_queue)
1129 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback,
1130 context, page_queue);
1132 return PTR_ERR(ent);
1135 init_completion(&ent->done);
1137 INIT_WORK(&ent->work, cmd_work_handler);
1139 cmd_work_handler(&ent->work);
1140 } else if (!queue_work(cmd->wq, &ent->work)) {
1141 mlx5_core_warn(dev, "failed to queue work\n");
1147 err = wait_func(dev, ent);
1148 if (err == -ETIMEDOUT)
1151 ds = ent->ts2 - ent->ts1;
1152 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
1153 if (op < ARRAY_SIZE(cmd->stats)) {
1154 stats = &cmd->stats[op];
1155 spin_lock_irq(&stats->lock);
1158 spin_unlock_irq(&stats->lock);
1160 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1161 "fw exec time for %s is %lld nsec\n",
1162 mlx5_command_str(op), (long long)ds);
1163 *status = ent->status;
1175 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size)
1180 if (to == NULL || from == NULL)
1183 delta = min_t(size_t, size, sizeof(to->first.data));
1184 memcpy(to->first.data, from, delta);
1185 from = (char *)from + delta;
1188 for (i = 0; size != 0; i++) {
1189 struct mlx5_cmd_prot_block *block;
1191 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE);
1193 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
1194 memcpy(block->data, from, delta);
1195 from = (char *)from + delta;
1201 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1206 if (to == NULL || from == NULL)
1209 delta = min_t(size_t, size, sizeof(from->first.data));
1210 memcpy(to, from->first.data, delta);
1211 to = (char *)to + delta;
1214 for (i = 0; size != 0; i++) {
1215 struct mlx5_cmd_prot_block *block;
1217 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE);
1219 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
1220 memcpy(to, block->data, delta);
1221 to = (char *)to + delta;
1227 static struct mlx5_cmd_msg *
1228 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size)
1230 struct mlx5_cmd_msg *msg;
1235 blen = size - min_t(size_t, sizeof(msg->first.data), size);
1236 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE);
1238 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE));
1240 return (ERR_PTR(-ENOMEM));
1242 for (i = 0; i != n; i++) {
1243 struct mlx5_cmd_prot_block *block;
1245 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
1247 memset(block, 0, MLX5_CMD_MBOX_SIZE);
1250 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE);
1251 block->next = cpu_to_be64(dma);
1253 block->block_num = cpu_to_be32(i);
1256 /* make sure initial data is written to RAM */
1257 mlx5_fwp_flush(msg);
1263 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1269 static void set_wqname(struct mlx5_core_dev *dev)
1271 struct mlx5_cmd *cmd = &dev->cmd;
1273 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1274 dev_name(&dev->pdev->dev));
1277 static void clean_debug_files(struct mlx5_core_dev *dev)
1282 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1284 struct mlx5_cmd *cmd = &dev->cmd;
1287 for (i = 0; i < cmd->max_reg_cmds; i++)
1290 down(&cmd->pages_sem);
1292 flush_workqueue(cmd->wq);
1294 cmd->mode = CMD_MODE_EVENTS;
1296 up(&cmd->pages_sem);
1297 for (i = 0; i < cmd->max_reg_cmds; i++)
1301 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1303 struct mlx5_cmd *cmd = &dev->cmd;
1305 synchronize_irq(dev->priv.eq_table.pages_eq.irqn);
1306 flush_workqueue(dev->priv.pg_wq);
1307 cmd->moving_to_polling = 1;
1308 flush_workqueue(cmd->wq);
1309 cmd->mode = CMD_MODE_POLLING;
1310 cmd->moving_to_polling = 0;
1313 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1315 unsigned long flags;
1318 spin_lock_irqsave(&msg->cache->lock, flags);
1319 list_add_tail(&msg->list, &msg->cache->head);
1320 spin_unlock_irqrestore(&msg->cache->lock, flags);
1322 mlx5_free_cmd_msg(dev, msg);
1326 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector)
1328 struct mlx5_cmd *cmd = &dev->cmd;
1329 struct mlx5_cmd_work_ent *ent;
1332 /* make sure data gets read from RAM */
1333 mlx5_fwp_invalidate(cmd->cmd_page);
1335 while (vector != 0) {
1336 i = ffs(vector) - 1;
1337 vector &= ~(1U << i);
1338 ent = cmd->ent_arr[i];
1339 ent->ts2 = ktime_get_ns();
1340 memcpy(ent->out->first.data, ent->lay->out,
1341 sizeof(ent->lay->out));
1342 /* make sure data gets read from RAM */
1343 mlx5_fwp_invalidate(ent->out);
1344 dump_command(dev, ent, 0);
1346 if (!cmd->checksum_disabled)
1347 ent->ret = verify_signature(ent);
1350 ent->status = ent->lay->status_own >> 1;
1353 "FW command ret 0x%x, status %s(0x%x)\n",
1355 deliv_status_to_str(ent->status),
1358 free_ent(cmd, ent->idx);
1359 complete_command(ent);
1362 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1364 void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
1366 unsigned long vector;
1368 unsigned long flags;
1369 synchronize_irq(dev->priv.eq_table.cmd_eq.irqn);
1370 spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1371 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1372 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1377 for (i = 0; i < (1 << dev->cmd.log_sz); i++) {
1378 struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i];
1380 if (!test_bit(i, &vector))
1384 usleep_range(1000, 1100);
1385 free_ent(&dev->cmd, i);
1386 complete_command(ent);
1389 EXPORT_SYMBOL(mlx5_trigger_cmd_completions);
1391 static int status_to_err(u8 status)
1393 return status ? -1 : 0; /* TBD more meaningful codes */
1396 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1399 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1400 struct mlx5_cmd *cmd = &dev->cmd;
1401 struct cache_ent *ent = NULL;
1403 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1404 ent = &cmd->cache.large;
1405 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1406 ent = &cmd->cache.med;
1409 spin_lock_irq(&ent->lock);
1410 if (!list_empty(&ent->head)) {
1411 msg = list_entry(ent->head.next, struct mlx5_cmd_msg,
1413 list_del(&msg->list);
1415 spin_unlock_irq(&ent->lock);
1419 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1424 static int is_manage_pages(struct mlx5_inbox_hdr *in)
1426 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1429 static int cmd_exec_helper(struct mlx5_core_dev *dev,
1430 void *in, int in_size,
1431 void *out, int out_size,
1432 mlx5_cmd_cbk_t callback, void *context)
1434 struct mlx5_cmd_msg *inb;
1435 struct mlx5_cmd_msg *outb;
1441 pages_queue = is_manage_pages(in);
1442 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1444 inb = alloc_msg(dev, in_size, gfp);
1450 err = mlx5_copy_to_msg(inb, in, in_size);
1452 mlx5_core_warn(dev, "err %d\n", err);
1456 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1458 err = PTR_ERR(outb);
1462 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback,
1463 context, pages_queue, &status);
1465 if (err == -ETIMEDOUT)
1470 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1472 err = status_to_err(status);
1479 err = mlx5_copy_from_msg(out, outb, out_size);
1482 mlx5_free_cmd_msg(dev, outb);
1489 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1492 return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL);
1494 EXPORT_SYMBOL(mlx5_cmd_exec);
1496 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1497 void *out, int out_size, mlx5_cmd_cbk_t callback,
1500 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context);
1502 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1504 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1506 struct mlx5_cmd *cmd = &dev->cmd;
1507 struct mlx5_cmd_msg *msg;
1508 struct mlx5_cmd_msg *n;
1510 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1511 list_del(&msg->list);
1512 mlx5_free_cmd_msg(dev, msg);
1515 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1516 list_del(&msg->list);
1517 mlx5_free_cmd_msg(dev, msg);
1521 static int create_msg_cache(struct mlx5_core_dev *dev)
1523 struct mlx5_cmd *cmd = &dev->cmd;
1524 struct mlx5_cmd_msg *msg;
1528 spin_lock_init(&cmd->cache.large.lock);
1529 INIT_LIST_HEAD(&cmd->cache.large.head);
1530 spin_lock_init(&cmd->cache.med.lock);
1531 INIT_LIST_HEAD(&cmd->cache.med.head);
1533 for (i = 0; i < NUM_LONG_LISTS; i++) {
1534 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1539 msg->cache = &cmd->cache.large;
1540 list_add_tail(&msg->list, &cmd->cache.large.head);
1543 for (i = 0; i < NUM_MED_LISTS; i++) {
1544 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1549 msg->cache = &cmd->cache.med;
1550 list_add_tail(&msg->list, &cmd->cache.med.head);
1556 destroy_msg_cache(dev);
1561 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1565 sx_init(&cmd->dma_sx, "MLX5-DMA-SX");
1566 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF);
1567 cv_init(&cmd->dma_cv, "MLX5-DMA-CV");
1570 * Create global DMA descriptor tag for allocating
1571 * 4K firmware pages:
1573 err = -bus_dma_tag_create(
1574 bus_get_dma_tag(dev->pdev->dev.bsddev),
1575 MLX5_ADAPTER_PAGE_SIZE, /* alignment */
1576 0, /* no boundary */
1577 BUS_SPACE_MAXADDR, /* lowaddr */
1578 BUS_SPACE_MAXADDR, /* highaddr */
1579 NULL, NULL, /* filter, filterarg */
1580 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */
1582 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */
1584 NULL, NULL, /* lockfunc, lockfuncarg */
1587 goto failure_destroy_sx;
1589 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
1590 if (cmd->cmd_page == NULL) {
1592 goto failure_alloc_page;
1594 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0);
1595 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0);
1599 bus_dma_tag_destroy(cmd->dma_tag);
1602 cv_destroy(&cmd->dma_cv);
1603 mtx_destroy(&cmd->dma_mtx);
1604 sx_destroy(&cmd->dma_sx);
1609 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1612 mlx5_fwp_free(cmd->cmd_page);
1613 bus_dma_tag_destroy(cmd->dma_tag);
1614 cv_destroy(&cmd->dma_cv);
1615 mtx_destroy(&cmd->dma_mtx);
1616 sx_destroy(&cmd->dma_sx);
1619 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1621 struct mlx5_cmd *cmd = &dev->cmd;
1627 cmd_if_rev = cmdif_rev_get(dev);
1628 if (cmd_if_rev != CMD_IF_REV) {
1629 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev);
1633 err = alloc_cmd_page(dev, cmd);
1637 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1638 cmd->log_sz = cmd_l >> 4 & 0xf;
1639 cmd->log_stride = cmd_l & 0xf;
1640 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1641 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz);
1646 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1647 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n");
1652 cmd->checksum_disabled = 1;
1653 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1654 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1656 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1657 if (cmd->cmdif_rev > CMD_IF_REV) {
1658 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev);
1663 spin_lock_init(&cmd->alloc_lock);
1664 spin_lock_init(&cmd->token_lock);
1665 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1666 spin_lock_init(&cmd->stats[i].lock);
1668 sema_init(&cmd->sem, cmd->max_reg_cmds);
1669 sema_init(&cmd->pages_sem, 1);
1671 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1672 cmd_l = (u32)(cmd->dma);
1673 if (cmd_l & 0xfff) {
1674 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n");
1679 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1680 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1682 /* Make sure firmware sees the complete address before we proceed */
1685 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1687 cmd->mode = CMD_MODE_POLLING;
1689 err = create_msg_cache(dev);
1691 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n");
1696 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1698 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n");
1706 destroy_msg_cache(dev);
1709 free_cmd_page(dev, cmd);
1714 EXPORT_SYMBOL(mlx5_cmd_init);
1716 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1718 struct mlx5_cmd *cmd = &dev->cmd;
1720 clean_debug_files(dev);
1721 destroy_workqueue(cmd->wq);
1722 destroy_msg_cache(dev);
1723 free_cmd_page(dev, cmd);
1725 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1727 static const char *cmd_status_str(u8 status)
1730 case MLX5_CMD_STAT_OK:
1732 case MLX5_CMD_STAT_INT_ERR:
1733 return "internal error";
1734 case MLX5_CMD_STAT_BAD_OP_ERR:
1735 return "bad operation";
1736 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1737 return "bad parameter";
1738 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1739 return "bad system state";
1740 case MLX5_CMD_STAT_BAD_RES_ERR:
1741 return "bad resource";
1742 case MLX5_CMD_STAT_RES_BUSY:
1743 return "resource busy";
1744 case MLX5_CMD_STAT_LIM_ERR:
1745 return "limits exceeded";
1746 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1747 return "bad resource state";
1748 case MLX5_CMD_STAT_IX_ERR:
1750 case MLX5_CMD_STAT_NO_RES_ERR:
1751 return "no resources";
1752 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1753 return "bad input length";
1754 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1755 return "bad output length";
1756 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1757 return "bad QP state";
1758 case MLX5_CMD_STAT_BAD_PKT_ERR:
1759 return "bad packet (discarded)";
1760 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1761 return "bad size too many outstanding CQEs";
1763 return "unknown status";
1767 static int cmd_status_to_err_helper(u8 status)
1770 case MLX5_CMD_STAT_OK: return 0;
1771 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1772 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1773 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1774 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1775 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1776 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1777 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
1778 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1779 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1780 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1781 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1782 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1783 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1784 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1785 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1786 default: return -EIO;
1790 /* this will be available till all the commands use set/get macros */
1791 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1796 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome));
1798 return cmd_status_to_err_helper(hdr->status);
1801 int mlx5_cmd_status_to_err_v2(void *ptr)
1806 status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1810 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1812 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome);
1814 return cmd_status_to_err_helper(status);