2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/pci.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/random.h>
35 #include <linux/io-mapping.h>
36 #include <linux/hardirq.h>
37 #include <linux/ktime.h>
38 #include <dev/mlx5/driver.h>
40 #include "mlx5_core.h"
42 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size);
43 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
44 struct mlx5_cmd_msg *msg);
45 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE,
61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
65 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
79 struct mlx5_cmd_msg *in,
80 struct mlx5_cmd_msg *out,
81 void *uout, int uout_size,
83 void *context, int page_queue)
85 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
86 struct mlx5_cmd_work_ent *ent;
88 ent = kzalloc(sizeof(*ent), alloc_flags);
90 return ERR_PTR(-ENOMEM);
95 ent->uout_size = uout_size;
97 ent->context = context;
99 ent->page_queue = page_queue;
104 static u8 alloc_token(struct mlx5_cmd *cmd)
108 spin_lock(&cmd->token_lock);
113 spin_unlock(&cmd->token_lock);
118 static int alloc_ent(struct mlx5_cmd_work_ent *ent)
121 struct mlx5_cmd *cmd = ent->cmd;
122 struct mlx5_core_dev *dev =
123 container_of(cmd, struct mlx5_core_dev, cmd);
124 int ret = cmd->max_reg_cmds;
126 spin_lock_irqsave(&cmd->alloc_lock, flags);
127 if (!ent->page_queue) {
128 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
129 if (ret >= cmd->max_reg_cmds)
133 if (dev->state != MLX5_DEVICE_STATE_UP)
139 clear_bit(ent->idx, &cmd->bitmask);
140 cmd->ent_arr[ent->idx] = ent;
142 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
147 static void free_ent(struct mlx5_cmd *cmd, int idx)
151 spin_lock_irqsave(&cmd->alloc_lock, flags);
152 set_bit(idx, &cmd->bitmask);
153 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
156 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
158 return cmd->cmd_buf + (idx << cmd->log_stride);
161 static u8 xor8_buf(void *buf, int len)
167 for (i = 0; i < len; i++)
173 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
175 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
178 if (xor8_buf(block, sizeof(*block)) != 0xff)
184 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
187 block->token = token;
189 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
190 sizeof(block->data) - 2);
191 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
195 static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
197 struct mlx5_cmd_mailbox *next = msg->next;
200 calc_block_sig(next->buf, token, csum);
205 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
207 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
208 calc_chain_sig(ent->in, ent->token, csum);
209 calc_chain_sig(ent->out, ent->token, csum);
212 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
214 struct mlx5_core_dev *dev = container_of(ent->cmd,
215 struct mlx5_core_dev, cmd);
216 int poll_end = jiffies +
217 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
221 own = ent->lay->status_own;
222 if (!(own & CMD_OWNER_HW) ||
223 dev->state != MLX5_DEVICE_STATE_UP) {
227 usleep_range(5000, 10000);
228 } while (time_before(jiffies, poll_end));
230 ent->ret = -ETIMEDOUT;
233 static void free_cmd(struct mlx5_cmd_work_ent *ent)
239 static int verify_signature(struct mlx5_cmd_work_ent *ent)
241 struct mlx5_cmd_mailbox *next = ent->out->next;
245 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
250 err = verify_block_sig(next->buf);
260 static void dump_buf(void *buf, int size, int data_only, int offset)
265 for (i = 0; i < size; i += 16) {
266 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
267 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
276 const char *mlx5_command_str(int command)
279 case MLX5_CMD_OP_QUERY_HCA_CAP:
280 return "QUERY_HCA_CAP";
282 case MLX5_CMD_OP_SET_HCA_CAP:
283 return "SET_HCA_CAP";
285 case MLX5_CMD_OP_QUERY_ADAPTER:
286 return "QUERY_ADAPTER";
288 case MLX5_CMD_OP_INIT_HCA:
291 case MLX5_CMD_OP_TEARDOWN_HCA:
292 return "TEARDOWN_HCA";
294 case MLX5_CMD_OP_ENABLE_HCA:
295 return "MLX5_CMD_OP_ENABLE_HCA";
297 case MLX5_CMD_OP_DISABLE_HCA:
298 return "MLX5_CMD_OP_DISABLE_HCA";
300 case MLX5_CMD_OP_QUERY_PAGES:
301 return "QUERY_PAGES";
303 case MLX5_CMD_OP_MANAGE_PAGES:
304 return "MANAGE_PAGES";
306 case MLX5_CMD_OP_QUERY_ISSI:
309 case MLX5_CMD_OP_SET_ISSI:
312 case MLX5_CMD_OP_CREATE_MKEY:
313 return "CREATE_MKEY";
315 case MLX5_CMD_OP_QUERY_MKEY:
318 case MLX5_CMD_OP_DESTROY_MKEY:
319 return "DESTROY_MKEY";
321 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
322 return "QUERY_SPECIAL_CONTEXTS";
324 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
325 return "PAGE_FAULT_RESUME";
327 case MLX5_CMD_OP_CREATE_EQ:
330 case MLX5_CMD_OP_DESTROY_EQ:
333 case MLX5_CMD_OP_QUERY_EQ:
336 case MLX5_CMD_OP_GEN_EQE:
339 case MLX5_CMD_OP_CREATE_CQ:
342 case MLX5_CMD_OP_DESTROY_CQ:
345 case MLX5_CMD_OP_QUERY_CQ:
348 case MLX5_CMD_OP_MODIFY_CQ:
351 case MLX5_CMD_OP_CREATE_QP:
354 case MLX5_CMD_OP_DESTROY_QP:
357 case MLX5_CMD_OP_RST2INIT_QP:
358 return "RST2INIT_QP";
360 case MLX5_CMD_OP_INIT2RTR_QP:
361 return "INIT2RTR_QP";
363 case MLX5_CMD_OP_RTR2RTS_QP:
366 case MLX5_CMD_OP_RTS2RTS_QP:
369 case MLX5_CMD_OP_SQERR2RTS_QP:
370 return "SQERR2RTS_QP";
372 case MLX5_CMD_OP_2ERR_QP:
375 case MLX5_CMD_OP_2RST_QP:
378 case MLX5_CMD_OP_QUERY_QP:
381 case MLX5_CMD_OP_SQD_RTS_QP:
384 case MLX5_CMD_OP_MAD_IFC:
387 case MLX5_CMD_OP_INIT2INIT_QP:
388 return "INIT2INIT_QP";
390 case MLX5_CMD_OP_CREATE_PSV:
393 case MLX5_CMD_OP_DESTROY_PSV:
394 return "DESTROY_PSV";
396 case MLX5_CMD_OP_CREATE_SRQ:
399 case MLX5_CMD_OP_DESTROY_SRQ:
400 return "DESTROY_SRQ";
402 case MLX5_CMD_OP_QUERY_SRQ:
405 case MLX5_CMD_OP_ARM_RQ:
408 case MLX5_CMD_OP_CREATE_XRC_SRQ:
409 return "CREATE_XRC_SRQ";
411 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
412 return "DESTROY_XRC_SRQ";
414 case MLX5_CMD_OP_QUERY_XRC_SRQ:
415 return "QUERY_XRC_SRQ";
417 case MLX5_CMD_OP_ARM_XRC_SRQ:
418 return "ARM_XRC_SRQ";
420 case MLX5_CMD_OP_CREATE_DCT:
423 case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
424 return "SET_DC_CNAK_TRACE";
426 case MLX5_CMD_OP_DESTROY_DCT:
427 return "DESTROY_DCT";
429 case MLX5_CMD_OP_DRAIN_DCT:
432 case MLX5_CMD_OP_QUERY_DCT:
435 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
436 return "ARM_DCT_FOR_KEY_VIOLATION";
438 case MLX5_CMD_OP_QUERY_VPORT_STATE:
439 return "QUERY_VPORT_STATE";
441 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
442 return "MODIFY_VPORT_STATE";
444 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
445 return "QUERY_ESW_VPORT_CONTEXT";
447 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
448 return "MODIFY_ESW_VPORT_CONTEXT";
450 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
451 return "QUERY_NIC_VPORT_CONTEXT";
453 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
454 return "MODIFY_NIC_VPORT_CONTEXT";
456 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
457 return "QUERY_ROCE_ADDRESS";
459 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
460 return "SET_ROCE_ADDRESS";
462 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
463 return "QUERY_HCA_VPORT_CONTEXT";
465 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
466 return "MODIFY_HCA_VPORT_CONTEXT";
468 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
469 return "QUERY_HCA_VPORT_GID";
471 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
472 return "QUERY_HCA_VPORT_PKEY";
474 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
475 return "QUERY_VPORT_COUNTER";
477 case MLX5_CMD_OP_SET_WOL_ROL:
478 return "SET_WOL_ROL";
480 case MLX5_CMD_OP_QUERY_WOL_ROL:
481 return "QUERY_WOL_ROL";
483 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
484 return "ALLOC_Q_COUNTER";
486 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
487 return "DEALLOC_Q_COUNTER";
489 case MLX5_CMD_OP_QUERY_Q_COUNTER:
490 return "QUERY_Q_COUNTER";
492 case MLX5_CMD_OP_ALLOC_PD:
495 case MLX5_CMD_OP_DEALLOC_PD:
498 case MLX5_CMD_OP_ALLOC_UAR:
501 case MLX5_CMD_OP_DEALLOC_UAR:
502 return "DEALLOC_UAR";
504 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
505 return "CONFIG_INT_MODERATION";
507 case MLX5_CMD_OP_ATTACH_TO_MCG:
508 return "ATTACH_TO_MCG";
510 case MLX5_CMD_OP_DETACH_FROM_MCG:
511 return "DETACH_FROM_MCG";
513 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
514 return "GET_DROPPED_PACKET_LOG";
516 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
517 return "QUERY_MAD_DEMUX";
519 case MLX5_CMD_OP_SET_MAD_DEMUX:
520 return "SET_MAD_DEMUX";
522 case MLX5_CMD_OP_NOP:
525 case MLX5_CMD_OP_ALLOC_XRCD:
528 case MLX5_CMD_OP_DEALLOC_XRCD:
529 return "DEALLOC_XRCD";
531 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
532 return "ALLOC_TRANSPORT_DOMAIN";
534 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
535 return "DEALLOC_TRANSPORT_DOMAIN";
537 case MLX5_CMD_OP_QUERY_CONG_STATUS:
538 return "QUERY_CONG_STATUS";
540 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
541 return "MODIFY_CONG_STATUS";
543 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
544 return "QUERY_CONG_PARAMS";
546 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
547 return "MODIFY_CONG_PARAMS";
549 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
550 return "QUERY_CONG_STATISTICS";
552 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
553 return "ADD_VXLAN_UDP_DPORT";
555 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
556 return "DELETE_VXLAN_UDP_DPORT";
558 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
559 return "SET_L2_TABLE_ENTRY";
561 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
562 return "QUERY_L2_TABLE_ENTRY";
564 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
565 return "DELETE_L2_TABLE_ENTRY";
567 case MLX5_CMD_OP_CREATE_RMP:
570 case MLX5_CMD_OP_MODIFY_RMP:
573 case MLX5_CMD_OP_DESTROY_RMP:
574 return "DESTROY_RMP";
576 case MLX5_CMD_OP_QUERY_RMP:
579 case MLX5_CMD_OP_CREATE_RQT:
582 case MLX5_CMD_OP_MODIFY_RQT:
585 case MLX5_CMD_OP_DESTROY_RQT:
586 return "DESTROY_RQT";
588 case MLX5_CMD_OP_QUERY_RQT:
591 case MLX5_CMD_OP_ACCESS_REG:
592 return "MLX5_CMD_OP_ACCESS_REG";
594 case MLX5_CMD_OP_CREATE_SQ:
597 case MLX5_CMD_OP_MODIFY_SQ:
600 case MLX5_CMD_OP_DESTROY_SQ:
603 case MLX5_CMD_OP_QUERY_SQ:
606 case MLX5_CMD_OP_CREATE_RQ:
609 case MLX5_CMD_OP_MODIFY_RQ:
612 case MLX5_CMD_OP_DESTROY_RQ:
615 case MLX5_CMD_OP_QUERY_RQ:
618 case MLX5_CMD_OP_CREATE_TIR:
621 case MLX5_CMD_OP_MODIFY_TIR:
624 case MLX5_CMD_OP_DESTROY_TIR:
625 return "DESTROY_TIR";
627 case MLX5_CMD_OP_QUERY_TIR:
630 case MLX5_CMD_OP_CREATE_TIS:
633 case MLX5_CMD_OP_MODIFY_TIS:
636 case MLX5_CMD_OP_DESTROY_TIS:
637 return "DESTROY_TIS";
639 case MLX5_CMD_OP_QUERY_TIS:
642 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
643 return "CREATE_FLOW_TABLE";
645 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
646 return "DESTROY_FLOW_TABLE";
648 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
649 return "QUERY_FLOW_TABLE";
651 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
652 return "CREATE_FLOW_GROUP";
654 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
655 return "DESTROY_FLOW_GROUP";
657 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
658 return "QUERY_FLOW_GROUP";
660 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
661 return "SET_FLOW_TABLE_ENTRY";
663 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
664 return "QUERY_FLOW_TABLE_ENTRY";
666 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
667 return "DELETE_FLOW_TABLE_ENTRY";
669 case MLX5_CMD_OP_SET_DIAGNOSTICS:
670 return "MLX5_CMD_OP_SET_DIAGNOSTICS";
672 case MLX5_CMD_OP_QUERY_DIAGNOSTICS:
673 return "MLX5_CMD_OP_QUERY_DIAGNOSTICS";
675 default: return "unknown command opcode";
679 static void dump_command(struct mlx5_core_dev *dev,
680 struct mlx5_cmd_work_ent *ent, int input)
682 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
683 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
684 struct mlx5_cmd_mailbox *next = msg->next;
689 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
692 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
693 "dump command data %s(0x%x) %s\n",
694 mlx5_command_str(op), op,
695 input ? "INPUT" : "OUTPUT");
697 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
698 mlx5_command_str(op), op,
699 input ? "INPUT" : "OUTPUT");
703 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
704 offset += sizeof(ent->lay->in);
706 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
707 offset += sizeof(ent->lay->out);
710 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
711 offset += sizeof(*ent->lay);
714 while (next && offset < msg->len) {
716 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
717 dump_buf(next->buf, dump_len, 1, offset);
718 offset += MLX5_CMD_DATA_BLOCK_SIZE;
720 mlx5_core_dbg(dev, "command block:\n");
721 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
722 offset += sizeof(struct mlx5_cmd_prot_block);
731 static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode,
732 struct mlx5_outbox_hdr *hdr)
738 case MLX5_CMD_OP_TEARDOWN_HCA:
739 case MLX5_CMD_OP_DISABLE_HCA:
740 case MLX5_CMD_OP_MANAGE_PAGES:
741 case MLX5_CMD_OP_DESTROY_MKEY:
742 case MLX5_CMD_OP_DESTROY_EQ:
743 case MLX5_CMD_OP_DESTROY_CQ:
744 case MLX5_CMD_OP_DESTROY_QP:
745 case MLX5_CMD_OP_DESTROY_PSV:
746 case MLX5_CMD_OP_DESTROY_SRQ:
747 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
748 case MLX5_CMD_OP_DESTROY_DCT:
749 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
750 case MLX5_CMD_OP_DEALLOC_PD:
751 case MLX5_CMD_OP_DEALLOC_UAR:
752 case MLX5_CMD_OP_DETACH_FROM_MCG:
753 case MLX5_CMD_OP_DEALLOC_XRCD:
754 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
755 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
756 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
757 case MLX5_CMD_OP_DESTROY_LAG:
758 case MLX5_CMD_OP_DESTROY_VPORT_LAG:
759 case MLX5_CMD_OP_DESTROY_TIR:
760 case MLX5_CMD_OP_DESTROY_SQ:
761 case MLX5_CMD_OP_DESTROY_RQ:
762 case MLX5_CMD_OP_DESTROY_RMP:
763 case MLX5_CMD_OP_DESTROY_TIS:
764 case MLX5_CMD_OP_DESTROY_RQT:
765 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
766 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
767 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
768 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
769 case MLX5_CMD_OP_2ERR_QP:
770 case MLX5_CMD_OP_2RST_QP:
771 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
772 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
773 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
774 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
775 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
776 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
777 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
778 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
779 case MLX5_CMD_OP_MODIFY_SQ:
780 case MLX5_CMD_OP_MODIFY_RQ:
781 case MLX5_CMD_OP_MODIFY_TIS:
782 case MLX5_CMD_OP_MODIFY_LAG:
783 case MLX5_CMD_OP_MODIFY_TIR:
784 case MLX5_CMD_OP_MODIFY_RMP:
785 case MLX5_CMD_OP_MODIFY_RQT:
786 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
787 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
788 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
789 case MLX5_CMD_OP_MODIFY_CQ:
790 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
791 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
792 case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP:
793 case MLX5_CMD_OP_ACCESS_REG:
794 case MLX5_CMD_OP_DRAIN_DCT:
797 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
798 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
799 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
800 case MLX5_CMD_OP_ALLOC_PD:
801 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
802 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
803 case MLX5_CMD_OP_ALLOC_UAR:
804 case MLX5_CMD_OP_ALLOC_XRCD:
805 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
806 case MLX5_CMD_OP_ARM_RQ:
807 case MLX5_CMD_OP_ARM_XRC_SRQ:
808 case MLX5_CMD_OP_ATTACH_TO_MCG:
809 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
810 case MLX5_CMD_OP_CREATE_CQ:
811 case MLX5_CMD_OP_CREATE_DCT:
812 case MLX5_CMD_OP_CREATE_EQ:
813 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
814 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
815 case MLX5_CMD_OP_CREATE_LAG:
816 case MLX5_CMD_OP_CREATE_MKEY:
817 case MLX5_CMD_OP_CREATE_PSV:
818 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
819 case MLX5_CMD_OP_CREATE_QP:
820 case MLX5_CMD_OP_CREATE_RMP:
821 case MLX5_CMD_OP_CREATE_RQ:
822 case MLX5_CMD_OP_CREATE_RQT:
823 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
824 case MLX5_CMD_OP_CREATE_SQ:
825 case MLX5_CMD_OP_CREATE_SRQ:
826 case MLX5_CMD_OP_CREATE_TIR:
827 case MLX5_CMD_OP_CREATE_TIS:
828 case MLX5_CMD_OP_CREATE_VPORT_LAG:
829 case MLX5_CMD_OP_CREATE_XRC_SRQ:
830 case MLX5_CMD_OP_ENABLE_HCA:
831 case MLX5_CMD_OP_GEN_EQE:
832 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
833 case MLX5_CMD_OP_INIT2INIT_QP:
834 case MLX5_CMD_OP_INIT2RTR_QP:
835 case MLX5_CMD_OP_INIT_HCA:
836 case MLX5_CMD_OP_MAD_IFC:
837 case MLX5_CMD_OP_NOP:
838 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
839 case MLX5_CMD_OP_QUERY_ADAPTER:
840 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
841 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
842 case MLX5_CMD_OP_QUERY_CONG_STATUS:
843 case MLX5_CMD_OP_QUERY_CQ:
844 case MLX5_CMD_OP_QUERY_DCT:
845 case MLX5_CMD_OP_QUERY_EQ:
846 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
847 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
848 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
849 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
850 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
851 case MLX5_CMD_OP_QUERY_HCA_CAP:
852 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
853 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
854 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
855 case MLX5_CMD_OP_QUERY_ISSI:
856 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
857 case MLX5_CMD_OP_QUERY_LAG:
858 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
859 case MLX5_CMD_OP_QUERY_MKEY:
860 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
861 case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
862 case MLX5_CMD_OP_QUERY_PAGES:
863 case MLX5_CMD_OP_QUERY_QP:
864 case MLX5_CMD_OP_QUERY_Q_COUNTER:
865 case MLX5_CMD_OP_QUERY_RMP:
866 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
867 case MLX5_CMD_OP_QUERY_RQ:
868 case MLX5_CMD_OP_QUERY_RQT:
869 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
870 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
871 case MLX5_CMD_OP_QUERY_SQ:
872 case MLX5_CMD_OP_QUERY_SRQ:
873 case MLX5_CMD_OP_QUERY_TIR:
874 case MLX5_CMD_OP_QUERY_TIS:
875 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
876 case MLX5_CMD_OP_QUERY_VPORT_STATE:
877 case MLX5_CMD_OP_QUERY_XRC_SRQ:
878 case MLX5_CMD_OP_RST2INIT_QP:
879 case MLX5_CMD_OP_RTR2RTS_QP:
880 case MLX5_CMD_OP_RTS2RTS_QP:
881 case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
882 case MLX5_CMD_OP_SET_HCA_CAP:
883 case MLX5_CMD_OP_SET_ISSI:
884 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
885 case MLX5_CMD_OP_SET_MAD_DEMUX:
886 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
887 case MLX5_CMD_OP_SQD_RTS_QP:
888 case MLX5_CMD_OP_SQERR2RTS_QP:
889 hdr->status = MLX5_CMD_STAT_INT_ERR;
890 hdr->syndrome = 0xFFFFFFFF;
893 mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode);
898 static void complete_command(struct mlx5_cmd_work_ent *ent)
900 struct mlx5_cmd *cmd = ent->cmd;
901 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev,
903 mlx5_cmd_cbk_t callback;
907 struct mlx5_cmd_stats *stats;
910 struct semaphore *sem;
913 sem = &cmd->pages_sem;
917 if (dev->state != MLX5_DEVICE_STATE_UP) {
918 struct mlx5_outbox_hdr *out_hdr =
919 (struct mlx5_outbox_hdr *)ent->out;
920 struct mlx5_inbox_hdr *in_hdr =
921 (struct mlx5_inbox_hdr *)(ent->in->first.data);
922 u16 opcode = be16_to_cpu(in_hdr->opcode);
924 ent->ret = set_internal_err_outbox(dev,
930 ds = ent->ts2 - ent->ts1;
931 if (ent->op < ARRAY_SIZE(cmd->stats)) {
932 stats = &cmd->stats[ent->op];
933 spin_lock_irqsave(&stats->lock, flags);
936 spin_unlock_irqrestore(&stats->lock, flags);
939 callback = ent->callback;
940 context = ent->context;
943 err = mlx5_copy_from_msg(ent->uout,
947 mlx5_free_cmd_msg(dev, ent->out);
948 free_msg(dev, ent->in);
951 callback(err, context);
953 complete(&ent->done);
958 static void cmd_work_handler(struct work_struct *work)
960 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
961 struct mlx5_cmd *cmd = ent->cmd;
962 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
963 struct mlx5_cmd_layout *lay;
964 struct semaphore *sem;
966 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
967 if (cmd->moving_to_polling) {
968 mlx5_core_warn(dev, "not expecting command execution, ignoring...\n");
974 if (alloc_ent(ent) < 0) {
975 complete_command(ent);
979 ent->token = alloc_token(cmd);
980 lay = get_inst(cmd, ent->idx);
982 memset(lay, 0, sizeof(*lay));
983 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
984 ent->op = be32_to_cpu(lay->in[0]) >> 16;
986 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
987 lay->inlen = cpu_to_be32(ent->in->len);
989 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
990 lay->outlen = cpu_to_be32(ent->out->len);
991 lay->type = MLX5_PCI_CMD_XPORT;
992 lay->token = ent->token;
993 lay->status_own = CMD_OWNER_HW;
994 set_signature(ent, !cmd->checksum_disabled);
995 dump_command(dev, ent, 1);
996 ent->ts1 = ktime_get_ns();
998 /* ring doorbell after the descriptor is valid */
999 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
1001 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
1003 /* if not in polling don't use ent after this point*/
1004 if (cmd->mode == CMD_MODE_POLLING) {
1006 /* make sure we read the descriptor after ownership is SW */
1008 mlx5_cmd_comp_handler(dev, 1U << ent->idx);
1012 static const char *deliv_status_to_str(u8 status)
1015 case MLX5_CMD_DELIVERY_STAT_OK:
1017 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1018 return "signature error";
1019 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1020 return "token error";
1021 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1022 return "bad block number";
1023 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1024 return "output pointer not aligned to block size";
1025 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1026 return "input pointer not aligned to block size";
1027 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1028 return "firmware internal error";
1029 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1030 return "command input length error";
1031 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1032 return "command ouput length error";
1033 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1034 return "reserved fields not cleared";
1035 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1036 return "bad command descriptor type";
1038 return "unknown status code";
1042 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
1044 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
1046 return be16_to_cpu(hdr->opcode);
1049 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
1051 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
1052 struct mlx5_cmd *cmd = &dev->cmd;
1055 if (cmd->mode == CMD_MODE_POLLING) {
1056 wait_for_completion(&ent->done);
1059 if (!wait_for_completion_timeout(&ent->done, timeout))
1065 if (err == -ETIMEDOUT) {
1066 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1067 mlx5_command_str(msg_to_opcode(ent->in)),
1068 msg_to_opcode(ent->in));
1070 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1071 err, deliv_status_to_str(ent->status), ent->status);
1077 * 1. Callback functions may not sleep
1078 * 2. page queue commands do not support asynchrous completion
1080 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1081 struct mlx5_cmd_msg *out, void *uout, int uout_size,
1082 mlx5_cmd_cbk_t callback,
1083 void *context, int page_queue, u8 *status)
1085 struct mlx5_cmd *cmd = &dev->cmd;
1086 struct mlx5_cmd_work_ent *ent;
1087 struct mlx5_cmd_stats *stats;
1092 if (callback && page_queue)
1095 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
1098 return PTR_ERR(ent);
1101 init_completion(&ent->done);
1103 INIT_WORK(&ent->work, cmd_work_handler);
1105 cmd_work_handler(&ent->work);
1106 } else if (!queue_work(cmd->wq, &ent->work)) {
1107 mlx5_core_warn(dev, "failed to queue work\n");
1113 err = wait_func(dev, ent);
1114 if (err == -ETIMEDOUT)
1117 ds = ent->ts2 - ent->ts1;
1118 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
1119 if (op < ARRAY_SIZE(cmd->stats)) {
1120 stats = &cmd->stats[op];
1121 spin_lock_irq(&stats->lock);
1124 spin_unlock_irq(&stats->lock);
1126 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1127 "fw exec time for %s is %lld nsec\n",
1128 mlx5_command_str(op), (long long)ds);
1129 *status = ent->status;
1141 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
1143 struct mlx5_cmd_prot_block *block;
1144 struct mlx5_cmd_mailbox *next;
1150 copy = min_t(int, size, sizeof(to->first.data));
1151 memcpy(to->first.data, from, copy);
1162 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1164 memcpy(block->data, from, copy);
1173 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1175 struct mlx5_cmd_prot_block *block;
1176 struct mlx5_cmd_mailbox *next;
1182 copy = min_t(int, size, sizeof(from->first.data));
1183 memcpy(to, from->first.data, copy);
1194 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1197 memcpy(to, block->data, copy);
1206 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1209 struct mlx5_cmd_mailbox *mailbox;
1211 mailbox = kmalloc(sizeof(*mailbox), flags);
1213 return ERR_PTR(-ENOMEM);
1215 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
1217 if (!mailbox->buf) {
1218 mlx5_core_dbg(dev, "failed allocation\n");
1220 return ERR_PTR(-ENOMEM);
1222 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
1223 mailbox->next = NULL;
1228 static void free_cmd_box(struct mlx5_core_dev *dev,
1229 struct mlx5_cmd_mailbox *mailbox)
1231 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1235 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1236 gfp_t flags, int size)
1238 struct mlx5_cmd_mailbox *tmp, *head = NULL;
1239 struct mlx5_cmd_prot_block *block;
1240 struct mlx5_cmd_msg *msg;
1246 msg = kzalloc(sizeof(*msg), flags);
1248 return ERR_PTR(-ENOMEM);
1250 blen = size - min_t(int, sizeof(msg->first.data), size);
1251 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
1253 for (i = 0; i < n; i++) {
1254 tmp = alloc_cmd_box(dev, flags);
1256 mlx5_core_warn(dev, "failed allocating block\n");
1263 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1264 block->block_num = cpu_to_be32(n - i - 1);
1274 free_cmd_box(dev, head);
1279 return ERR_PTR(err);
1282 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1283 struct mlx5_cmd_msg *msg)
1285 struct mlx5_cmd_mailbox *head = msg->next;
1286 struct mlx5_cmd_mailbox *next;
1290 free_cmd_box(dev, head);
1296 static void set_wqname(struct mlx5_core_dev *dev)
1298 struct mlx5_cmd *cmd = &dev->cmd;
1300 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1301 dev_name(&dev->pdev->dev));
1304 static void clean_debug_files(struct mlx5_core_dev *dev)
1309 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1311 struct mlx5_cmd *cmd = &dev->cmd;
1314 for (i = 0; i < cmd->max_reg_cmds; i++)
1317 down(&cmd->pages_sem);
1319 flush_workqueue(cmd->wq);
1321 cmd->mode = CMD_MODE_EVENTS;
1323 up(&cmd->pages_sem);
1324 for (i = 0; i < cmd->max_reg_cmds; i++)
1328 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1330 struct mlx5_cmd *cmd = &dev->cmd;
1332 synchronize_irq(dev->priv.eq_table.pages_eq.irqn);
1333 flush_workqueue(dev->priv.pg_wq);
1334 cmd->moving_to_polling = 1;
1335 flush_workqueue(cmd->wq);
1336 cmd->mode = CMD_MODE_POLLING;
1337 cmd->moving_to_polling = 0;
1340 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1342 unsigned long flags;
1345 spin_lock_irqsave(&msg->cache->lock, flags);
1346 list_add_tail(&msg->list, &msg->cache->head);
1347 spin_unlock_irqrestore(&msg->cache->lock, flags);
1349 mlx5_free_cmd_msg(dev, msg);
1353 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector)
1355 struct mlx5_cmd *cmd = &dev->cmd;
1356 struct mlx5_cmd_work_ent *ent;
1359 while (vector != 0) {
1360 i = ffs(vector) - 1;
1361 vector &= ~(1U << i);
1362 ent = cmd->ent_arr[i];
1363 ent->ts2 = ktime_get_ns();
1364 memcpy(ent->out->first.data, ent->lay->out,
1365 sizeof(ent->lay->out));
1366 dump_command(dev, ent, 0);
1368 if (!cmd->checksum_disabled)
1369 ent->ret = verify_signature(ent);
1372 ent->status = ent->lay->status_own >> 1;
1375 "FW command ret 0x%x, status %s(0x%x)\n",
1377 deliv_status_to_str(ent->status),
1380 free_ent(cmd, ent->idx);
1381 complete_command(ent);
1384 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1386 void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
1388 unsigned long vector;
1390 unsigned long flags;
1391 synchronize_irq(dev->priv.eq_table.cmd_eq.irqn);
1392 spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1393 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1394 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1399 for (i = 0; i < (1 << dev->cmd.log_sz); i++) {
1400 struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i];
1402 if (!test_bit(i, &vector))
1406 usleep_range(1000, 1100);
1407 free_ent(&dev->cmd, i);
1408 complete_command(ent);
1411 EXPORT_SYMBOL(mlx5_trigger_cmd_completions);
1413 static int status_to_err(u8 status)
1415 return status ? -1 : 0; /* TBD more meaningful codes */
1418 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1421 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1422 struct mlx5_cmd *cmd = &dev->cmd;
1423 struct cache_ent *ent = NULL;
1425 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1426 ent = &cmd->cache.large;
1427 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1428 ent = &cmd->cache.med;
1431 spin_lock_irq(&ent->lock);
1432 if (!list_empty(&ent->head)) {
1433 msg = list_entry(ent->head.next, struct mlx5_cmd_msg,
1435 /* For cached lists, we must explicitly state what is
1439 list_del(&msg->list);
1441 spin_unlock_irq(&ent->lock);
1445 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1450 static int is_manage_pages(struct mlx5_inbox_hdr *in)
1452 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1455 static int cmd_exec_helper(struct mlx5_core_dev *dev,
1456 void *in, int in_size,
1457 void *out, int out_size,
1458 mlx5_cmd_cbk_t callback, void *context)
1460 struct mlx5_cmd_msg *inb;
1461 struct mlx5_cmd_msg *outb;
1467 pages_queue = is_manage_pages(in);
1468 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1470 inb = alloc_msg(dev, in_size, gfp);
1476 err = mlx5_copy_to_msg(inb, in, in_size);
1478 mlx5_core_warn(dev, "err %d\n", err);
1482 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1484 err = PTR_ERR(outb);
1488 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1489 pages_queue, &status);
1491 if (err == -ETIMEDOUT)
1496 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1498 err = status_to_err(status);
1505 err = mlx5_copy_from_msg(out, outb, out_size);
1508 mlx5_free_cmd_msg(dev, outb);
1515 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1518 return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL);
1520 EXPORT_SYMBOL(mlx5_cmd_exec);
1522 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1523 void *out, int out_size, mlx5_cmd_cbk_t callback,
1526 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context);
1528 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1530 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1532 struct mlx5_cmd *cmd = &dev->cmd;
1533 struct mlx5_cmd_msg *msg;
1534 struct mlx5_cmd_msg *n;
1536 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1537 list_del(&msg->list);
1538 mlx5_free_cmd_msg(dev, msg);
1541 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1542 list_del(&msg->list);
1543 mlx5_free_cmd_msg(dev, msg);
1547 static int create_msg_cache(struct mlx5_core_dev *dev)
1549 struct mlx5_cmd *cmd = &dev->cmd;
1550 struct mlx5_cmd_msg *msg;
1554 spin_lock_init(&cmd->cache.large.lock);
1555 INIT_LIST_HEAD(&cmd->cache.large.head);
1556 spin_lock_init(&cmd->cache.med.lock);
1557 INIT_LIST_HEAD(&cmd->cache.med.head);
1559 for (i = 0; i < NUM_LONG_LISTS; i++) {
1560 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1565 msg->cache = &cmd->cache.large;
1566 list_add_tail(&msg->list, &cmd->cache.large.head);
1569 for (i = 0; i < NUM_MED_LISTS; i++) {
1570 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1575 msg->cache = &cmd->cache.med;
1576 list_add_tail(&msg->list, &cmd->cache.med.head);
1582 destroy_msg_cache(dev);
1586 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1588 struct device *ddev = &dev->pdev->dev;
1589 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1590 &cmd->alloc_dma, GFP_KERNEL);
1591 if (!cmd->cmd_alloc_buf)
1594 /* make sure it is aligned to 4K */
1595 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1596 cmd->cmd_buf = cmd->cmd_alloc_buf;
1597 cmd->dma = cmd->alloc_dma;
1598 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1602 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, cmd->alloc_dma);
1603 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1604 &cmd->alloc_dma, GFP_KERNEL);
1605 if (!cmd->cmd_alloc_buf)
1608 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1609 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1610 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1614 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1616 struct device *ddev = &dev->pdev->dev;
1617 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, cmd->alloc_dma);
1620 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1622 int size = sizeof(struct mlx5_cmd_prot_block);
1623 int align = roundup_pow_of_two(size);
1624 struct mlx5_cmd *cmd = &dev->cmd;
1630 cmd_if_rev = cmdif_rev_get(dev);
1631 if (cmd_if_rev != CMD_IF_REV) {
1632 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev);
1636 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1640 err = alloc_cmd_page(dev, cmd);
1644 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1645 cmd->log_sz = cmd_l >> 4 & 0xf;
1646 cmd->log_stride = cmd_l & 0xf;
1647 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1648 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz);
1653 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1654 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n");
1659 cmd->checksum_disabled = 1;
1660 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1661 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1663 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1664 if (cmd->cmdif_rev > CMD_IF_REV) {
1665 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev);
1670 spin_lock_init(&cmd->alloc_lock);
1671 spin_lock_init(&cmd->token_lock);
1672 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1673 spin_lock_init(&cmd->stats[i].lock);
1675 sema_init(&cmd->sem, cmd->max_reg_cmds);
1676 sema_init(&cmd->pages_sem, 1);
1678 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1679 cmd_l = (u32)(cmd->dma);
1680 if (cmd_l & 0xfff) {
1681 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n");
1686 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1687 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1689 /* Make sure firmware sees the complete address before we proceed */
1692 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1694 cmd->mode = CMD_MODE_POLLING;
1696 err = create_msg_cache(dev);
1698 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n");
1703 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1705 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n");
1713 destroy_msg_cache(dev);
1716 free_cmd_page(dev, cmd);
1719 pci_pool_destroy(cmd->pool);
1723 EXPORT_SYMBOL(mlx5_cmd_init);
1725 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1727 struct mlx5_cmd *cmd = &dev->cmd;
1729 clean_debug_files(dev);
1730 destroy_workqueue(cmd->wq);
1731 destroy_msg_cache(dev);
1732 free_cmd_page(dev, cmd);
1733 pci_pool_destroy(cmd->pool);
1735 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1737 static const char *cmd_status_str(u8 status)
1740 case MLX5_CMD_STAT_OK:
1742 case MLX5_CMD_STAT_INT_ERR:
1743 return "internal error";
1744 case MLX5_CMD_STAT_BAD_OP_ERR:
1745 return "bad operation";
1746 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1747 return "bad parameter";
1748 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1749 return "bad system state";
1750 case MLX5_CMD_STAT_BAD_RES_ERR:
1751 return "bad resource";
1752 case MLX5_CMD_STAT_RES_BUSY:
1753 return "resource busy";
1754 case MLX5_CMD_STAT_LIM_ERR:
1755 return "limits exceeded";
1756 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1757 return "bad resource state";
1758 case MLX5_CMD_STAT_IX_ERR:
1760 case MLX5_CMD_STAT_NO_RES_ERR:
1761 return "no resources";
1762 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1763 return "bad input length";
1764 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1765 return "bad output length";
1766 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1767 return "bad QP state";
1768 case MLX5_CMD_STAT_BAD_PKT_ERR:
1769 return "bad packet (discarded)";
1770 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1771 return "bad size too many outstanding CQEs";
1773 return "unknown status";
1777 static int cmd_status_to_err_helper(u8 status)
1780 case MLX5_CMD_STAT_OK: return 0;
1781 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1782 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1783 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1784 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1785 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1786 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1787 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
1788 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1789 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1790 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1791 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1792 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1793 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1794 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1795 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1796 default: return -EIO;
1800 /* this will be available till all the commands use set/get macros */
1801 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1806 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome));
1808 return cmd_status_to_err_helper(hdr->status);
1811 int mlx5_cmd_status_to_err_v2(void *ptr)
1816 status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1820 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1822 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome);
1824 return cmd_status_to_err_helper(status);