2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/pci.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/random.h>
35 #include <linux/io-mapping.h>
36 #include <linux/hardirq.h>
37 #include <linux/ktime.h>
38 #include <dev/mlx5/driver.h>
39 #include <dev/mlx5/cmd.h>
41 #include "mlx5_core.h"
43 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size);
44 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
45 struct mlx5_cmd_msg *msg);
46 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
60 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
61 MLX5_CMD_DATA_BLOCK_SIZE,
62 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
66 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
67 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
68 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
69 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
70 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
71 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
72 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
73 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
74 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
75 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
76 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
79 struct mlx5_ifc_mbox_out_bits {
81 u8 reserved_at_8[0x18];
85 u8 reserved_at_40[0x40];
88 struct mlx5_ifc_mbox_in_bits {
90 u8 reserved_at_10[0x10];
92 u8 reserved_at_20[0x10];
95 u8 reserved_at_40[0x40];
99 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
100 struct mlx5_cmd_msg *in,
102 struct mlx5_cmd_msg *out,
103 void *uout, int uout_size,
105 void *context, int page_queue)
107 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
108 struct mlx5_cmd_work_ent *ent;
110 ent = kzalloc(sizeof(*ent), alloc_flags);
112 return ERR_PTR(-ENOMEM);
115 ent->uin_size = uin_size;
118 ent->uout_size = uout_size;
120 ent->context = context;
122 ent->page_queue = page_queue;
127 static u8 alloc_token(struct mlx5_cmd *cmd)
131 spin_lock(&cmd->token_lock);
136 spin_unlock(&cmd->token_lock);
141 static int alloc_ent(struct mlx5_cmd_work_ent *ent)
144 struct mlx5_cmd *cmd = ent->cmd;
145 struct mlx5_core_dev *dev =
146 container_of(cmd, struct mlx5_core_dev, cmd);
147 int ret = cmd->max_reg_cmds;
149 spin_lock_irqsave(&cmd->alloc_lock, flags);
150 if (!ent->page_queue) {
151 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
152 if (ret >= cmd->max_reg_cmds)
156 if (dev->state != MLX5_DEVICE_STATE_UP)
162 clear_bit(ent->idx, &cmd->bitmask);
163 cmd->ent_arr[ent->idx] = ent;
165 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
170 static void free_ent(struct mlx5_cmd *cmd, int idx)
174 spin_lock_irqsave(&cmd->alloc_lock, flags);
175 set_bit(idx, &cmd->bitmask);
176 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
179 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
181 return cmd->cmd_buf + (idx << cmd->log_stride);
184 static u8 xor8_buf(void *buf, int len)
190 for (i = 0; i < len; i++)
196 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
198 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
201 if (xor8_buf(block, sizeof(*block)) != 0xff)
207 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
210 block->token = token;
212 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
213 sizeof(block->data) - 2);
214 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
219 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
223 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
224 struct mlx5_cmd_prot_block *block;
226 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
228 /* compute signature */
229 calc_block_sig(block, token, csum);
231 /* check for last block */
232 if (block->next == 0)
236 /* make sure data gets written to RAM */
240 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
242 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
243 calc_chain_sig(ent->in, ent->token, csum);
244 calc_chain_sig(ent->out, ent->token, csum);
247 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
249 struct mlx5_core_dev *dev = container_of(ent->cmd,
250 struct mlx5_core_dev, cmd);
251 int poll_end = jiffies +
252 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
256 own = ent->lay->status_own;
257 if (!(own & CMD_OWNER_HW) ||
258 dev->state != MLX5_DEVICE_STATE_UP) {
262 usleep_range(5000, 10000);
263 } while (time_before(jiffies, poll_end));
265 ent->ret = -ETIMEDOUT;
268 static void free_cmd(struct mlx5_cmd_work_ent *ent)
270 cancel_delayed_work_sync(&ent->cb_timeout_work);
275 verify_signature(struct mlx5_cmd_work_ent *ent)
277 struct mlx5_cmd_msg *msg = ent->out;
282 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
286 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
287 struct mlx5_cmd_prot_block *block;
289 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
291 /* compute signature */
292 err = verify_block_sig(block);
296 /* check for last block */
297 if (block->next == 0)
303 static void dump_buf(void *buf, int size, int data_only, int offset)
308 for (i = 0; i < size; i += 16) {
309 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
310 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
320 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
321 MLX5_DRIVER_SYND = 0xbadd00de,
324 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
325 u32 *synd, u8 *status)
331 case MLX5_CMD_OP_TEARDOWN_HCA:
332 case MLX5_CMD_OP_DISABLE_HCA:
333 case MLX5_CMD_OP_MANAGE_PAGES:
334 case MLX5_CMD_OP_DESTROY_MKEY:
335 case MLX5_CMD_OP_DESTROY_EQ:
336 case MLX5_CMD_OP_DESTROY_CQ:
337 case MLX5_CMD_OP_DESTROY_QP:
338 case MLX5_CMD_OP_DESTROY_PSV:
339 case MLX5_CMD_OP_DESTROY_SRQ:
340 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
341 case MLX5_CMD_OP_DESTROY_DCT:
342 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
343 case MLX5_CMD_OP_DEALLOC_PD:
344 case MLX5_CMD_OP_DEALLOC_UAR:
345 case MLX5_CMD_OP_DETACH_FROM_MCG:
346 case MLX5_CMD_OP_DEALLOC_XRCD:
347 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
348 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
349 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
350 case MLX5_CMD_OP_DESTROY_TIR:
351 case MLX5_CMD_OP_DESTROY_SQ:
352 case MLX5_CMD_OP_DESTROY_RQ:
353 case MLX5_CMD_OP_DESTROY_RMP:
354 case MLX5_CMD_OP_DESTROY_TIS:
355 case MLX5_CMD_OP_DESTROY_RQT:
356 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
357 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
358 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
359 case MLX5_CMD_OP_2ERR_QP:
360 case MLX5_CMD_OP_2RST_QP:
361 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
362 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
363 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
364 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
365 return MLX5_CMD_STAT_OK;
367 case MLX5_CMD_OP_QUERY_HCA_CAP:
368 case MLX5_CMD_OP_QUERY_ADAPTER:
369 case MLX5_CMD_OP_INIT_HCA:
370 case MLX5_CMD_OP_ENABLE_HCA:
371 case MLX5_CMD_OP_QUERY_PAGES:
372 case MLX5_CMD_OP_SET_HCA_CAP:
373 case MLX5_CMD_OP_QUERY_ISSI:
374 case MLX5_CMD_OP_SET_ISSI:
375 case MLX5_CMD_OP_CREATE_MKEY:
376 case MLX5_CMD_OP_QUERY_MKEY:
377 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
378 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
379 case MLX5_CMD_OP_CREATE_EQ:
380 case MLX5_CMD_OP_QUERY_EQ:
381 case MLX5_CMD_OP_GEN_EQE:
382 case MLX5_CMD_OP_CREATE_CQ:
383 case MLX5_CMD_OP_QUERY_CQ:
384 case MLX5_CMD_OP_MODIFY_CQ:
385 case MLX5_CMD_OP_CREATE_QP:
386 case MLX5_CMD_OP_RST2INIT_QP:
387 case MLX5_CMD_OP_INIT2RTR_QP:
388 case MLX5_CMD_OP_RTR2RTS_QP:
389 case MLX5_CMD_OP_RTS2RTS_QP:
390 case MLX5_CMD_OP_SQERR2RTS_QP:
391 case MLX5_CMD_OP_QUERY_QP:
392 case MLX5_CMD_OP_SQD_RTS_QP:
393 case MLX5_CMD_OP_INIT2INIT_QP:
394 case MLX5_CMD_OP_CREATE_PSV:
395 case MLX5_CMD_OP_CREATE_SRQ:
396 case MLX5_CMD_OP_QUERY_SRQ:
397 case MLX5_CMD_OP_ARM_RQ:
398 case MLX5_CMD_OP_CREATE_XRC_SRQ:
399 case MLX5_CMD_OP_QUERY_XRC_SRQ:
400 case MLX5_CMD_OP_ARM_XRC_SRQ:
401 case MLX5_CMD_OP_CREATE_DCT:
402 case MLX5_CMD_OP_DRAIN_DCT:
403 case MLX5_CMD_OP_QUERY_DCT:
404 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
405 case MLX5_CMD_OP_QUERY_VPORT_STATE:
406 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
407 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
408 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
409 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
410 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
411 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
412 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
413 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
414 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
415 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
416 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
417 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
418 case MLX5_CMD_OP_QUERY_Q_COUNTER:
419 case MLX5_CMD_OP_ALLOC_PD:
420 case MLX5_CMD_OP_ALLOC_UAR:
421 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
422 case MLX5_CMD_OP_ACCESS_REG:
423 case MLX5_CMD_OP_ATTACH_TO_MCG:
424 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
425 case MLX5_CMD_OP_MAD_IFC:
426 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
427 case MLX5_CMD_OP_SET_MAD_DEMUX:
428 case MLX5_CMD_OP_NOP:
429 case MLX5_CMD_OP_ALLOC_XRCD:
430 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
431 case MLX5_CMD_OP_QUERY_CONG_STATUS:
432 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
433 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
434 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
435 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
436 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
437 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
438 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
439 case MLX5_CMD_OP_CREATE_TIR:
440 case MLX5_CMD_OP_MODIFY_TIR:
441 case MLX5_CMD_OP_QUERY_TIR:
442 case MLX5_CMD_OP_CREATE_SQ:
443 case MLX5_CMD_OP_MODIFY_SQ:
444 case MLX5_CMD_OP_QUERY_SQ:
445 case MLX5_CMD_OP_CREATE_RQ:
446 case MLX5_CMD_OP_MODIFY_RQ:
447 case MLX5_CMD_OP_QUERY_RQ:
448 case MLX5_CMD_OP_CREATE_RMP:
449 case MLX5_CMD_OP_MODIFY_RMP:
450 case MLX5_CMD_OP_QUERY_RMP:
451 case MLX5_CMD_OP_CREATE_TIS:
452 case MLX5_CMD_OP_MODIFY_TIS:
453 case MLX5_CMD_OP_QUERY_TIS:
454 case MLX5_CMD_OP_CREATE_RQT:
455 case MLX5_CMD_OP_MODIFY_RQT:
456 case MLX5_CMD_OP_QUERY_RQT:
457 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
458 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
459 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
460 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
461 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
462 *status = MLX5_DRIVER_STATUS_ABORTED;
463 *synd = MLX5_DRIVER_SYND;
466 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
471 const char *mlx5_command_str(int command)
473 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
476 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
477 MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
478 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
479 MLX5_COMMAND_STR_CASE(INIT_HCA);
480 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
481 MLX5_COMMAND_STR_CASE(ENABLE_HCA);
482 MLX5_COMMAND_STR_CASE(DISABLE_HCA);
483 MLX5_COMMAND_STR_CASE(QUERY_PAGES);
484 MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
485 MLX5_COMMAND_STR_CASE(QUERY_ISSI);
486 MLX5_COMMAND_STR_CASE(SET_ISSI);
487 MLX5_COMMAND_STR_CASE(CREATE_MKEY);
488 MLX5_COMMAND_STR_CASE(QUERY_MKEY);
489 MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
490 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
491 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
492 MLX5_COMMAND_STR_CASE(CREATE_EQ);
493 MLX5_COMMAND_STR_CASE(DESTROY_EQ);
494 MLX5_COMMAND_STR_CASE(QUERY_EQ);
495 MLX5_COMMAND_STR_CASE(GEN_EQE);
496 MLX5_COMMAND_STR_CASE(CREATE_CQ);
497 MLX5_COMMAND_STR_CASE(DESTROY_CQ);
498 MLX5_COMMAND_STR_CASE(QUERY_CQ);
499 MLX5_COMMAND_STR_CASE(MODIFY_CQ);
500 MLX5_COMMAND_STR_CASE(CREATE_QP);
501 MLX5_COMMAND_STR_CASE(DESTROY_QP);
502 MLX5_COMMAND_STR_CASE(RST2INIT_QP);
503 MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
504 MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
505 MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
506 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
507 MLX5_COMMAND_STR_CASE(2ERR_QP);
508 MLX5_COMMAND_STR_CASE(2RST_QP);
509 MLX5_COMMAND_STR_CASE(QUERY_QP);
510 MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
511 MLX5_COMMAND_STR_CASE(MAD_IFC);
512 MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
513 MLX5_COMMAND_STR_CASE(CREATE_PSV);
514 MLX5_COMMAND_STR_CASE(DESTROY_PSV);
515 MLX5_COMMAND_STR_CASE(CREATE_SRQ);
516 MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
517 MLX5_COMMAND_STR_CASE(QUERY_SRQ);
518 MLX5_COMMAND_STR_CASE(ARM_RQ);
519 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
520 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
521 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
522 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
523 MLX5_COMMAND_STR_CASE(CREATE_DCT);
524 MLX5_COMMAND_STR_CASE(SET_DC_CNAK_TRACE);
525 MLX5_COMMAND_STR_CASE(DESTROY_DCT);
526 MLX5_COMMAND_STR_CASE(DRAIN_DCT);
527 MLX5_COMMAND_STR_CASE(QUERY_DCT);
528 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
529 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
530 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
531 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
532 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
533 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
534 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
535 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
536 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
537 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
538 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
539 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
540 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
541 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
542 MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
543 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
544 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
545 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
546 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
547 MLX5_COMMAND_STR_CASE(ALLOC_PD);
548 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
549 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
550 MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
551 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
552 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
553 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
554 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
555 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
556 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
557 MLX5_COMMAND_STR_CASE(NOP);
558 MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
559 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
560 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
561 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
562 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
563 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
564 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
565 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
566 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
567 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
568 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
569 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
570 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
571 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
572 MLX5_COMMAND_STR_CASE(CREATE_RMP);
573 MLX5_COMMAND_STR_CASE(MODIFY_RMP);
574 MLX5_COMMAND_STR_CASE(DESTROY_RMP);
575 MLX5_COMMAND_STR_CASE(QUERY_RMP);
576 MLX5_COMMAND_STR_CASE(CREATE_RQT);
577 MLX5_COMMAND_STR_CASE(MODIFY_RQT);
578 MLX5_COMMAND_STR_CASE(DESTROY_RQT);
579 MLX5_COMMAND_STR_CASE(QUERY_RQT);
580 MLX5_COMMAND_STR_CASE(ACCESS_REG);
581 MLX5_COMMAND_STR_CASE(CREATE_SQ);
582 MLX5_COMMAND_STR_CASE(MODIFY_SQ);
583 MLX5_COMMAND_STR_CASE(DESTROY_SQ);
584 MLX5_COMMAND_STR_CASE(QUERY_SQ);
585 MLX5_COMMAND_STR_CASE(CREATE_RQ);
586 MLX5_COMMAND_STR_CASE(MODIFY_RQ);
587 MLX5_COMMAND_STR_CASE(DESTROY_RQ);
588 MLX5_COMMAND_STR_CASE(QUERY_RQ);
589 MLX5_COMMAND_STR_CASE(CREATE_TIR);
590 MLX5_COMMAND_STR_CASE(MODIFY_TIR);
591 MLX5_COMMAND_STR_CASE(DESTROY_TIR);
592 MLX5_COMMAND_STR_CASE(QUERY_TIR);
593 MLX5_COMMAND_STR_CASE(CREATE_TIS);
594 MLX5_COMMAND_STR_CASE(MODIFY_TIS);
595 MLX5_COMMAND_STR_CASE(DESTROY_TIS);
596 MLX5_COMMAND_STR_CASE(QUERY_TIS);
597 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
598 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
599 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
600 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
601 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
602 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
603 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
604 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
605 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
606 MLX5_COMMAND_STR_CASE(SET_DIAGNOSTICS);
607 MLX5_COMMAND_STR_CASE(QUERY_DIAGNOSTICS);
608 default: return "unknown command opcode";
612 static const char *cmd_status_str(u8 status)
615 case MLX5_CMD_STAT_OK:
617 case MLX5_CMD_STAT_INT_ERR:
618 return "internal error";
619 case MLX5_CMD_STAT_BAD_OP_ERR:
620 return "bad operation";
621 case MLX5_CMD_STAT_BAD_PARAM_ERR:
622 return "bad parameter";
623 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
624 return "bad system state";
625 case MLX5_CMD_STAT_BAD_RES_ERR:
626 return "bad resource";
627 case MLX5_CMD_STAT_RES_BUSY:
628 return "resource busy";
629 case MLX5_CMD_STAT_LIM_ERR:
630 return "limits exceeded";
631 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
632 return "bad resource state";
633 case MLX5_CMD_STAT_IX_ERR:
635 case MLX5_CMD_STAT_NO_RES_ERR:
636 return "no resources";
637 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
638 return "bad input length";
639 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
640 return "bad output length";
641 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
642 return "bad QP state";
643 case MLX5_CMD_STAT_BAD_PKT_ERR:
644 return "bad packet (discarded)";
645 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
646 return "bad size too many outstanding CQEs";
648 return "unknown status";
652 static int cmd_status_to_err_helper(u8 status)
655 case MLX5_CMD_STAT_OK: return 0;
656 case MLX5_CMD_STAT_INT_ERR: return -EIO;
657 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
658 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
659 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
660 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
661 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
662 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
663 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
664 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
665 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
666 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
667 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
668 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
669 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
670 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
671 default: return -EIO;
675 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
677 *status = MLX5_GET(mbox_out, out, status);
678 *syndrome = MLX5_GET(mbox_out, out, syndrome);
681 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
688 mlx5_cmd_mbox_status(out, &status, &syndrome);
692 opcode = MLX5_GET(mbox_in, in, opcode);
693 op_mod = MLX5_GET(mbox_in, in, op_mod);
696 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
697 mlx5_command_str(opcode),
699 cmd_status_str(status),
703 return cmd_status_to_err_helper(status);
706 static void dump_command(struct mlx5_core_dev *dev,
707 struct mlx5_cmd_work_ent *ent, int input)
709 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
710 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
714 int msg_len = input ? ent->uin_size : ent->uout_size;
717 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
720 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
721 "dump command data %s(0x%x) %s\n",
722 mlx5_command_str(op), op,
723 input ? "INPUT" : "OUTPUT");
725 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
726 mlx5_command_str(op), op,
727 input ? "INPUT" : "OUTPUT");
731 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
732 offset += sizeof(ent->lay->in);
734 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
735 offset += sizeof(ent->lay->out);
738 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
739 offset += sizeof(*ent->lay);
742 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
743 struct mlx5_cmd_prot_block *block;
745 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
748 if (offset >= msg_len)
750 dump_len = min_t(int,
751 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset);
753 dump_buf(block->data, dump_len, 1, offset);
754 offset += MLX5_CMD_DATA_BLOCK_SIZE;
756 mlx5_core_dbg(dev, "command block:\n");
757 dump_buf(block, sizeof(*block), 0, offset);
758 offset += sizeof(*block);
761 /* check for last block */
762 if (block->next == 0)
770 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
772 return MLX5_GET(mbox_in, in->first.data, opcode);
775 static void cb_timeout_handler(struct work_struct *work)
777 struct delayed_work *dwork = container_of(work, struct delayed_work,
779 struct mlx5_cmd_work_ent *ent = container_of(dwork,
780 struct mlx5_cmd_work_ent,
782 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
785 ent->ret = -ETIMEDOUT;
786 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
787 mlx5_command_str(msg_to_opcode(ent->in)),
788 msg_to_opcode(ent->in));
789 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
792 static void complete_command(struct mlx5_cmd_work_ent *ent)
794 struct mlx5_cmd *cmd = ent->cmd;
795 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev,
797 mlx5_cmd_cbk_t callback;
801 struct mlx5_cmd_stats *stats;
804 struct semaphore *sem;
807 sem = &cmd->pages_sem;
811 if (dev->state != MLX5_DEVICE_STATE_UP) {
815 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
816 MLX5_SET(mbox_out, ent->out, status, status);
817 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
821 ds = ent->ts2 - ent->ts1;
822 if (ent->op < ARRAY_SIZE(cmd->stats)) {
823 stats = &cmd->stats[ent->op];
824 spin_lock_irqsave(&stats->lock, flags);
827 spin_unlock_irqrestore(&stats->lock, flags);
830 callback = ent->callback;
831 context = ent->context;
834 err = mlx5_copy_from_msg(ent->uout,
837 err = err ? err : mlx5_cmd_check(dev,
842 mlx5_free_cmd_msg(dev, ent->out);
843 free_msg(dev, ent->in);
845 err = err ? err : ent->status;
847 callback(err, context);
849 complete(&ent->done);
854 static void cmd_work_handler(struct work_struct *work)
856 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
857 struct mlx5_cmd *cmd = ent->cmd;
858 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
859 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
860 struct mlx5_cmd_layout *lay;
861 struct semaphore *sem;
862 bool poll_cmd = ent->polling;
864 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
867 if (alloc_ent(ent) < 0) {
868 complete_command(ent);
872 ent->token = alloc_token(cmd);
873 lay = get_inst(cmd, ent->idx);
875 memset(lay, 0, sizeof(*lay));
876 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
877 ent->op = be32_to_cpu(lay->in[0]) >> 16;
878 if (ent->in->numpages != 0)
879 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0));
880 if (ent->out->numpages != 0)
881 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0));
882 lay->inlen = cpu_to_be32(ent->uin_size);
883 lay->outlen = cpu_to_be32(ent->uout_size);
884 lay->type = MLX5_PCI_CMD_XPORT;
885 lay->token = ent->token;
886 lay->status_own = CMD_OWNER_HW;
887 set_signature(ent, !cmd->checksum_disabled);
888 dump_command(dev, ent, 1);
889 ent->ts1 = ktime_get_ns();
892 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
894 /* ring doorbell after the descriptor is valid */
895 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
896 /* make sure data is written to RAM */
897 mlx5_fwp_flush(cmd->cmd_page);
898 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
900 /* if not in polling don't use ent after this point*/
901 if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
903 /* make sure we read the descriptor after ownership is SW */
904 mlx5_cmd_comp_handler(dev, 1U << ent->idx);
908 static const char *deliv_status_to_str(u8 status)
911 case MLX5_CMD_DELIVERY_STAT_OK:
913 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
914 return "signature error";
915 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
916 return "token error";
917 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
918 return "bad block number";
919 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
920 return "output pointer not aligned to block size";
921 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
922 return "input pointer not aligned to block size";
923 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
924 return "firmware internal error";
925 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
926 return "command input length error";
927 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
928 return "command ouput length error";
929 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
930 return "reserved fields not cleared";
931 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
932 return "bad command descriptor type";
934 return "unknown status code";
938 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
940 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
941 struct mlx5_cmd *cmd = &dev->cmd;
944 if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
945 wait_for_completion(&ent->done);
947 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
948 ent->ret = -ETIMEDOUT;
949 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
954 if (err == -ETIMEDOUT) {
955 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
956 mlx5_command_str(msg_to_opcode(ent->in)),
957 msg_to_opcode(ent->in));
959 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
960 err, deliv_status_to_str(ent->status), ent->status);
966 * 1. Callback functions may not sleep
967 * 2. page queue commands do not support asynchrous completion
969 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
971 struct mlx5_cmd_msg *out, void *uout, int uout_size,
972 mlx5_cmd_cbk_t callback,
973 void *context, int page_queue, u8 *status,
976 struct mlx5_cmd *cmd = &dev->cmd;
977 struct mlx5_cmd_work_ent *ent;
978 struct mlx5_cmd_stats *stats;
983 if (callback && page_queue)
986 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback,
987 context, page_queue);
991 ent->polling = force_polling;
994 init_completion(&ent->done);
996 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
997 INIT_WORK(&ent->work, cmd_work_handler);
999 cmd_work_handler(&ent->work);
1000 } else if (!queue_work(cmd->wq, &ent->work)) {
1001 mlx5_core_warn(dev, "failed to queue work\n");
1009 err = wait_func(dev, ent);
1010 if (err == -ETIMEDOUT)
1013 ds = ent->ts2 - ent->ts1;
1014 op = MLX5_GET(mbox_in, in->first.data, opcode);
1015 if (op < ARRAY_SIZE(cmd->stats)) {
1016 stats = &cmd->stats[op];
1017 spin_lock_irq(&stats->lock);
1020 spin_unlock_irq(&stats->lock);
1022 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1023 "fw exec time for %s is %lld nsec\n",
1024 mlx5_command_str(op), (long long)ds);
1025 *status = ent->status;
1036 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size)
1041 if (to == NULL || from == NULL)
1044 delta = min_t(size_t, size, sizeof(to->first.data));
1045 memcpy(to->first.data, from, delta);
1046 from = (char *)from + delta;
1049 for (i = 0; size != 0; i++) {
1050 struct mlx5_cmd_prot_block *block;
1052 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE);
1054 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
1055 memcpy(block->data, from, delta);
1056 from = (char *)from + delta;
1062 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1067 if (to == NULL || from == NULL)
1070 delta = min_t(size_t, size, sizeof(from->first.data));
1071 memcpy(to, from->first.data, delta);
1072 to = (char *)to + delta;
1075 for (i = 0; size != 0; i++) {
1076 struct mlx5_cmd_prot_block *block;
1078 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE);
1080 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
1081 memcpy(to, block->data, delta);
1082 to = (char *)to + delta;
1088 static struct mlx5_cmd_msg *
1089 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size)
1091 struct mlx5_cmd_msg *msg;
1096 blen = size - min_t(size_t, sizeof(msg->first.data), size);
1097 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE);
1099 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE));
1101 return (ERR_PTR(-ENOMEM));
1103 for (i = 0; i != n; i++) {
1104 struct mlx5_cmd_prot_block *block;
1106 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
1108 memset(block, 0, MLX5_CMD_MBOX_SIZE);
1111 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE);
1112 block->next = cpu_to_be64(dma);
1114 block->block_num = cpu_to_be32(i);
1117 /* make sure initial data is written to RAM */
1118 mlx5_fwp_flush(msg);
1124 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1130 static void set_wqname(struct mlx5_core_dev *dev)
1132 struct mlx5_cmd *cmd = &dev->cmd;
1134 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1135 dev_name(&dev->pdev->dev));
1138 static void clean_debug_files(struct mlx5_core_dev *dev)
1143 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1145 struct mlx5_cmd *cmd = &dev->cmd;
1148 for (i = 0; i < cmd->max_reg_cmds; i++)
1151 down(&cmd->pages_sem);
1154 up(&cmd->pages_sem);
1155 for (i = 0; i < cmd->max_reg_cmds; i++)
1159 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1161 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1164 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1166 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1169 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1171 unsigned long flags;
1174 spin_lock_irqsave(&msg->cache->lock, flags);
1175 list_add_tail(&msg->list, &msg->cache->head);
1176 spin_unlock_irqrestore(&msg->cache->lock, flags);
1178 mlx5_free_cmd_msg(dev, msg);
1182 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector)
1184 struct mlx5_cmd *cmd = &dev->cmd;
1185 struct mlx5_cmd_work_ent *ent;
1188 /* make sure data gets read from RAM */
1189 mlx5_fwp_invalidate(cmd->cmd_page);
1191 while (vector != 0) {
1192 i = ffs(vector) - 1;
1193 vector &= ~(1U << i);
1194 ent = cmd->ent_arr[i];
1196 cancel_delayed_work(&ent->cb_timeout_work);
1197 ent->ts2 = ktime_get_ns();
1198 memcpy(ent->out->first.data, ent->lay->out,
1199 sizeof(ent->lay->out));
1200 /* make sure data gets read from RAM */
1201 mlx5_fwp_invalidate(ent->out);
1202 dump_command(dev, ent, 0);
1204 if (!cmd->checksum_disabled)
1205 ent->ret = verify_signature(ent);
1208 ent->status = ent->lay->status_own >> 1;
1209 if (vector & MLX5_TRIGGERED_CMD_COMP)
1210 ent->status = MLX5_DRIVER_STATUS_ABORTED;
1212 ent->status = ent->lay->status_own >> 1;
1215 "FW command ret 0x%x, status %s(0x%x)\n",
1217 deliv_status_to_str(ent->status),
1220 free_ent(cmd, ent->idx);
1221 complete_command(ent);
1224 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1226 static int status_to_err(u8 status)
1228 return status ? -1 : 0; /* TBD more meaningful codes */
1231 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1234 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1235 struct mlx5_cmd *cmd = &dev->cmd;
1236 struct cache_ent *ent = NULL;
1238 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1239 ent = &cmd->cache.large;
1240 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1241 ent = &cmd->cache.med;
1244 spin_lock_irq(&ent->lock);
1245 if (!list_empty(&ent->head)) {
1246 msg = list_entry(ent->head.next, struct mlx5_cmd_msg,
1248 list_del(&msg->list);
1250 spin_unlock_irq(&ent->lock);
1254 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1259 static int is_manage_pages(void *in)
1261 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1264 static int cmd_exec_helper(struct mlx5_core_dev *dev,
1265 void *in, int in_size,
1266 void *out, int out_size,
1267 mlx5_cmd_cbk_t callback, void *context,
1270 struct mlx5_cmd_msg *inb;
1271 struct mlx5_cmd_msg *outb;
1273 const gfp_t gfp = GFP_KERNEL;
1278 if (pci_channel_offline(dev->pdev) ||
1279 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1280 u16 opcode = MLX5_GET(mbox_in, in, opcode);
1281 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1282 MLX5_SET(mbox_out, out, status, status);
1283 MLX5_SET(mbox_out, out, syndrome, drv_synd);
1287 pages_queue = is_manage_pages(in);
1289 inb = alloc_msg(dev, in_size, gfp);
1295 err = mlx5_copy_to_msg(inb, in, in_size);
1297 mlx5_core_warn(dev, "err %d\n", err);
1301 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1303 err = PTR_ERR(outb);
1307 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback,
1308 context, pages_queue, &status, force_polling);
1310 if (err == -ETIMEDOUT)
1315 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1317 err = status_to_err(status);
1324 err = mlx5_copy_from_msg(out, outb, out_size);
1327 mlx5_free_cmd_msg(dev, outb);
1334 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1339 err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL, false);
1340 return err ? : mlx5_cmd_check(dev, in, out);
1342 EXPORT_SYMBOL(mlx5_cmd_exec);
1344 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1345 void *out, int out_size, mlx5_cmd_cbk_t callback,
1348 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context, false);
1350 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1352 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1353 void *out, int out_size)
1357 err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL, true);
1358 return err ? : mlx5_cmd_check(dev, in, out);
1360 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
1362 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1364 struct mlx5_cmd *cmd = &dev->cmd;
1365 struct mlx5_cmd_msg *msg;
1366 struct mlx5_cmd_msg *n;
1368 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1369 list_del(&msg->list);
1370 mlx5_free_cmd_msg(dev, msg);
1373 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1374 list_del(&msg->list);
1375 mlx5_free_cmd_msg(dev, msg);
1379 static int create_msg_cache(struct mlx5_core_dev *dev)
1381 struct mlx5_cmd *cmd = &dev->cmd;
1382 struct mlx5_cmd_msg *msg;
1386 spin_lock_init(&cmd->cache.large.lock);
1387 INIT_LIST_HEAD(&cmd->cache.large.head);
1388 spin_lock_init(&cmd->cache.med.lock);
1389 INIT_LIST_HEAD(&cmd->cache.med.head);
1391 for (i = 0; i < NUM_LONG_LISTS; i++) {
1392 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1397 msg->cache = &cmd->cache.large;
1398 list_add_tail(&msg->list, &cmd->cache.large.head);
1401 for (i = 0; i < NUM_MED_LISTS; i++) {
1402 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1407 msg->cache = &cmd->cache.med;
1408 list_add_tail(&msg->list, &cmd->cache.med.head);
1414 destroy_msg_cache(dev);
1419 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1423 sx_init(&cmd->dma_sx, "MLX5-DMA-SX");
1424 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF);
1425 cv_init(&cmd->dma_cv, "MLX5-DMA-CV");
1428 * Create global DMA descriptor tag for allocating
1429 * 4K firmware pages:
1431 err = -bus_dma_tag_create(
1432 bus_get_dma_tag(dev->pdev->dev.bsddev),
1433 MLX5_ADAPTER_PAGE_SIZE, /* alignment */
1434 0, /* no boundary */
1435 BUS_SPACE_MAXADDR, /* lowaddr */
1436 BUS_SPACE_MAXADDR, /* highaddr */
1437 NULL, NULL, /* filter, filterarg */
1438 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */
1440 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */
1442 NULL, NULL, /* lockfunc, lockfuncarg */
1445 goto failure_destroy_sx;
1447 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
1448 if (cmd->cmd_page == NULL) {
1450 goto failure_alloc_page;
1452 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0);
1453 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0);
1457 bus_dma_tag_destroy(cmd->dma_tag);
1460 cv_destroy(&cmd->dma_cv);
1461 mtx_destroy(&cmd->dma_mtx);
1462 sx_destroy(&cmd->dma_sx);
1467 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1470 mlx5_fwp_free(cmd->cmd_page);
1471 bus_dma_tag_destroy(cmd->dma_tag);
1472 cv_destroy(&cmd->dma_cv);
1473 mtx_destroy(&cmd->dma_mtx);
1474 sx_destroy(&cmd->dma_sx);
1477 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1479 struct mlx5_cmd *cmd = &dev->cmd;
1485 memset(cmd, 0, sizeof(*cmd));
1486 cmd_if_rev = cmdif_rev_get(dev);
1487 if (cmd_if_rev != CMD_IF_REV) {
1488 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev);
1492 err = alloc_cmd_page(dev, cmd);
1496 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1497 cmd->log_sz = cmd_l >> 4 & 0xf;
1498 cmd->log_stride = cmd_l & 0xf;
1499 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1500 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz);
1505 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1506 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n");
1511 cmd->checksum_disabled = 1;
1512 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1513 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1515 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1516 if (cmd->cmdif_rev > CMD_IF_REV) {
1517 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev);
1522 spin_lock_init(&cmd->alloc_lock);
1523 spin_lock_init(&cmd->token_lock);
1524 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1525 spin_lock_init(&cmd->stats[i].lock);
1527 sema_init(&cmd->sem, cmd->max_reg_cmds);
1528 sema_init(&cmd->pages_sem, 1);
1530 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1531 cmd_l = (u32)(cmd->dma);
1532 if (cmd_l & 0xfff) {
1533 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n");
1538 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1539 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1541 /* Make sure firmware sees the complete address before we proceed */
1544 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1546 cmd->mode = CMD_MODE_POLLING;
1548 err = create_msg_cache(dev);
1550 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n");
1555 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1557 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n");
1565 destroy_msg_cache(dev);
1568 free_cmd_page(dev, cmd);
1573 EXPORT_SYMBOL(mlx5_cmd_init);
1575 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1577 struct mlx5_cmd *cmd = &dev->cmd;
1579 clean_debug_files(dev);
1580 destroy_workqueue(cmd->wq);
1581 destroy_msg_cache(dev);
1582 free_cmd_page(dev, cmd);
1584 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1586 int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
1587 bool reset, void *out, int out_size)
1589 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
1591 MLX5_SET(query_cong_statistics_in, in, opcode,
1592 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
1593 MLX5_SET(query_cong_statistics_in, in, clear, reset);
1594 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
1596 EXPORT_SYMBOL(mlx5_cmd_query_cong_counter);
1598 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
1599 void *out, int out_size)
1601 u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
1603 MLX5_SET(query_cong_params_in, in, opcode,
1604 MLX5_CMD_OP_QUERY_CONG_PARAMS);
1605 MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
1607 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
1609 EXPORT_SYMBOL(mlx5_cmd_query_cong_params);
1611 int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
1612 void *in, int in_size)
1614 u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
1616 return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
1618 EXPORT_SYMBOL(mlx5_cmd_modify_cong_params);