2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
33 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
36 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37 u16 vport, u32 *out, int outlen)
40 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
42 MLX5_SET(query_vport_state_in, in, opcode,
43 MLX5_CMD_OP_QUERY_VPORT_STATE);
44 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
45 MLX5_SET(query_vport_state_in, in, vport_number, vport);
47 MLX5_SET(query_vport_state_in, in, other_vport, 1);
49 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
51 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
56 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
58 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
60 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
62 return MLX5_GET(query_vport_state_out, out, state);
64 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
66 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
68 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
70 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
72 return MLX5_GET(query_vport_state_out, out, admin_state);
74 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
76 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
79 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
80 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
83 MLX5_SET(modify_vport_state_in, in, opcode,
84 MLX5_CMD_OP_MODIFY_VPORT_STATE);
85 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
86 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
89 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
91 MLX5_SET(modify_vport_state_in, in, admin_state, state);
93 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
95 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
99 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
101 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
102 u32 *out, int outlen)
104 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
106 MLX5_SET(query_nic_vport_context_in, in, opcode,
107 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
109 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
111 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
113 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
116 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
120 case MLX5_INTERFACE_PROTOCOL_IB:
121 return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
122 MLX5_QCOUNTER_SETS_NETDEV);
123 case MLX5_INTERFACE_PROTOCOL_ETH:
124 return MLX5_QCOUNTER_SETS_NETDEV;
126 mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
131 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
132 int client_id, u16 *counter_set_id)
134 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
135 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
138 if (mdev->num_q_counter_allocated[client_id] >
139 mlx5_vport_max_q_counter_allocator(mdev, client_id))
142 MLX5_SET(alloc_q_counter_in, in, opcode,
143 MLX5_CMD_OP_ALLOC_Q_COUNTER);
145 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
148 *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
151 mdev->num_q_counter_allocated[client_id]++;
156 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
157 int client_id, u16 counter_set_id)
159 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
160 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
163 if (mdev->num_q_counter_allocated[client_id] <= 0)
166 MLX5_SET(dealloc_q_counter_in, in, opcode,
167 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
168 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
171 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
173 mdev->num_q_counter_allocated[client_id]--;
178 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
184 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
186 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
187 MLX5_SET(query_q_counter_in, in, clear, reset);
188 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
190 return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
193 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
195 u32 *out_of_rx_buffer)
197 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {0};
200 err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
206 *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
211 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
215 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
219 out = mlx5_vzalloc(outlen);
223 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
224 nic_vport_context.permanent_address);
226 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
230 ether_addr_copy(addr, &out_addr[2]);
236 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
238 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
242 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
247 in = mlx5_vzalloc(inlen);
249 mlx5_core_warn(mdev, "failed to allocate inbox\n");
253 MLX5_SET(modify_nic_vport_context_in, in,
254 field_select.permanent_address, 1);
255 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
258 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
260 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
261 in, nic_vport_context);
262 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
265 ether_addr_copy(&perm_mac[2], addr);
267 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
273 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
275 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
276 u64 *system_image_guid)
279 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
282 out = mlx5_vzalloc(outlen);
286 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
290 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
291 nic_vport_context.system_image_guid);
296 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
298 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
301 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
304 out = mlx5_vzalloc(outlen);
308 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
312 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
313 nic_vport_context.node_guid);
319 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
321 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
325 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
328 out = mlx5_vzalloc(outlen);
332 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
336 *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
337 nic_vport_context.port_guid);
344 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
348 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
351 out = mlx5_vzalloc(outlen);
355 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
359 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
360 nic_vport_context.qkey_violation_counter);
366 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
368 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
371 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
373 MLX5_SET(modify_nic_vport_context_in, in, opcode,
374 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
376 return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
379 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
383 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
386 in = mlx5_vzalloc(inlen);
388 mlx5_core_warn(mdev, "failed to allocate inbox\n");
392 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
393 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
396 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
403 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
404 bool other_vport, u8 *addr)
407 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
408 + MLX5_ST_SZ_BYTES(mac_address_layout);
413 in = mlx5_vzalloc(inlen);
415 mlx5_core_warn(mdev, "failed to allocate inbox\n");
419 MLX5_SET(modify_nic_vport_context_in, in,
420 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
421 MLX5_SET(modify_nic_vport_context_in, in,
422 vport_number, vport);
423 MLX5_SET(modify_nic_vport_context_in, in,
424 other_vport, other_vport);
425 MLX5_SET(modify_nic_vport_context_in, in,
426 field_select.addresses_list, 1);
427 MLX5_SET(modify_nic_vport_context_in, in,
428 nic_vport_context.allowed_list_type,
429 MLX5_NIC_VPORT_LIST_TYPE_UC);
430 MLX5_SET(modify_nic_vport_context_in, in,
431 nic_vport_context.allowed_list_size, 1);
433 mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
434 nic_vport_context.current_uc_mac_address);
435 mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
437 ether_addr_copy(mac_ptr, addr);
439 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
445 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
447 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
448 u32 vport, u64 node_guid)
451 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
453 void *nic_vport_context;
457 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
459 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
462 in = mlx5_vzalloc(inlen);
464 mlx5_core_warn(mdev, "failed to allocate inbox\n");
468 MLX5_SET(modify_nic_vport_context_in, in,
469 field_select.node_guid, 1);
470 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
472 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
474 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
475 in, nic_vport_context);
476 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
478 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
484 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
486 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
487 u32 vport, u64 port_guid)
490 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
492 void *nic_vport_context;
496 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
498 if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
501 in = mlx5_vzalloc(inlen);
503 mlx5_core_warn(mdev, "failed to allocate inbox\n");
507 MLX5_SET(modify_nic_vport_context_in, in,
508 field_select.port_guid, 1);
509 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
511 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
513 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
514 in, nic_vport_context);
515 MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
517 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
523 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
525 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
526 u16 *vlan_list, int list_len)
530 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
531 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
533 int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
535 if (list_len > max_list_size) {
536 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
537 list_len, max_list_size);
541 in = mlx5_vzalloc(inlen);
543 mlx5_core_warn(dev, "failed to allocate inbox\n");
547 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
549 MLX5_SET(modify_nic_vport_context_in, in,
551 MLX5_SET(modify_nic_vport_context_in, in,
552 field_select.addresses_list, 1);
554 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
556 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
557 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
558 MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
560 for (i = 0; i < list_len; i++) {
561 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
562 current_uc_mac_address[i]);
563 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
566 err = mlx5_modify_nic_vport_context(dev, in, inlen);
571 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
573 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
574 u64 *addr_list, size_t addr_list_len)
577 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
578 + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
581 int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
583 if ((int)addr_list_len > max_list_sz) {
584 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
585 (int)addr_list_len, max_list_sz);
589 in = mlx5_vzalloc(inlen);
591 mlx5_core_warn(mdev, "failed to allocate inbox\n");
595 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
597 MLX5_SET(modify_nic_vport_context_in, in,
599 MLX5_SET(modify_nic_vport_context_in, in,
600 field_select.addresses_list, 1);
602 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
604 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
605 MLX5_NIC_VPORT_LIST_TYPE_MC);
606 MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
608 for (i = 0; i < addr_list_len; i++) {
609 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
610 current_uc_mac_address[i]);
611 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
613 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
616 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
622 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
624 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
625 bool promisc_mc, bool promisc_uc,
628 u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
629 u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
632 memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
634 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
636 MLX5_SET(modify_nic_vport_context_in, in,
638 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
640 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
642 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
644 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
646 return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
648 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
650 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
652 enum mlx5_list_type list_type,
653 u8 addr_list[][ETH_ALEN],
656 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
665 req_list_size = *list_size;
667 max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
668 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
669 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
671 if (req_list_size > max_list_size) {
672 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
673 req_list_size, max_list_size);
674 req_list_size = max_list_size;
677 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
678 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
680 out = kzalloc(out_sz, GFP_KERNEL);
684 MLX5_SET(query_nic_vport_context_in, in, opcode,
685 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
686 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
687 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
690 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
692 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
696 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
698 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
701 *list_size = req_list_size;
702 for (i = 0; i < req_list_size; i++) {
703 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
705 current_uc_mac_address[i]) + 2;
706 ether_addr_copy(addr_list[i], mac_addr);
712 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
714 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
715 enum mlx5_list_type list_type,
716 u8 addr_list[][ETH_ALEN],
719 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
727 max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
728 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
729 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
731 if (list_size > max_list_size)
734 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
735 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
737 in = kzalloc(in_sz, GFP_KERNEL);
741 MLX5_SET(modify_nic_vport_context_in, in, opcode,
742 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
743 MLX5_SET(modify_nic_vport_context_in, in,
744 field_select.addresses_list, 1);
746 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
749 MLX5_SET(nic_vport_context, nic_vport_ctx,
750 allowed_list_type, list_type);
751 MLX5_SET(nic_vport_context, nic_vport_ctx,
752 allowed_list_size, list_size);
754 for (i = 0; i < list_size; i++) {
755 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
757 current_uc_mac_address[i]) + 2;
758 ether_addr_copy(curr_mac, addr_list[i]);
761 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
765 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
767 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
772 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
781 req_list_size = *size;
782 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
783 if (req_list_size > max_list_size) {
784 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
785 req_list_size, max_list_size);
786 req_list_size = max_list_size;
789 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
790 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
792 out = kzalloc(out_sz, GFP_KERNEL);
796 MLX5_SET(query_nic_vport_context_in, in, opcode,
797 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
798 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
799 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
800 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
803 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
805 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
809 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
811 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
814 *size = req_list_size;
815 for (i = 0; i < req_list_size; i++) {
816 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
818 current_uc_mac_address[i]);
819 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
825 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
827 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
831 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
839 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
841 if (list_size > max_list_size)
844 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
845 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
847 in = kzalloc(in_sz, GFP_KERNEL);
851 MLX5_SET(modify_nic_vport_context_in, in, opcode,
852 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
853 MLX5_SET(modify_nic_vport_context_in, in,
854 field_select.addresses_list, 1);
856 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
859 MLX5_SET(nic_vport_context, nic_vport_ctx,
860 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
861 MLX5_SET(nic_vport_context, nic_vport_ctx,
862 allowed_list_size, list_size);
864 for (i = 0; i < list_size; i++) {
865 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
867 current_uc_mac_address[i]);
868 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
871 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
875 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
877 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
880 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
883 out = kzalloc(outlen, GFP_KERNEL);
887 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
891 *enable = MLX5_GET(query_nic_vport_context_out, out,
892 nic_vport_context.roce_en);
898 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
900 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
904 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
908 in = mlx5_vzalloc(inlen);
910 mlx5_core_warn(mdev, "failed to allocate inbox\n");
914 MLX5_SET(modify_nic_vport_context_in, in,
915 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
916 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
917 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
918 MLX5_SET(modify_nic_vport_context_in, in,
919 field_select.permanent_address, 1);
920 mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
921 nic_vport_context.permanent_address.mac_addr_47_32);
922 ether_addr_copy(mac_ptr, addr);
924 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
930 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
932 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
934 return mlx5_nic_vport_enable_disable_roce(mdev, 1);
936 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
938 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
940 return mlx5_nic_vport_enable_disable_roce(mdev, 0);
942 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
944 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
945 int vf, u8 port_num, void *out,
948 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
949 int is_group_manager;
953 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
954 in = mlx5_vzalloc(in_sz);
960 MLX5_SET(query_vport_counter_in, in, opcode,
961 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
963 if (is_group_manager) {
964 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
965 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
971 if (MLX5_CAP_GEN(dev, num_ports) == 2)
972 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
974 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
979 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
981 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
982 u8 port_num, u8 vport_num, u32 *out,
985 u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
986 int is_group_manager;
988 is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
990 MLX5_SET(query_hca_vport_context_in, in, opcode,
991 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
994 if (is_group_manager) {
995 MLX5_SET(query_hca_vport_context_in, in, other_vport,
997 MLX5_SET(query_hca_vport_context_in, in, vport_number,
1004 if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1005 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1007 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
1010 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1011 u64 *system_image_guid)
1014 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1017 out = mlx5_vzalloc(outlen);
1021 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1025 *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1026 hca_vport_context.system_image_guid);
1032 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1034 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1037 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1040 out = mlx5_vzalloc(outlen);
1044 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1048 *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1049 hca_vport_context.node_guid);
1055 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1057 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1061 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1064 out = mlx5_vzalloc(outlen);
1068 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1072 *port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1073 hca_vport_context.port_guid);
1080 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1081 u16 vport_num, u16 gid_index, union ib_gid *gid)
1083 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1084 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1085 int is_group_manager;
1093 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1094 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1096 if (gid_index > tbsz && gid_index != 0xffff)
1099 if (gid_index == 0xffff)
1104 out_sz += nout * sizeof(*gid);
1106 in = mlx5_vzalloc(in_sz);
1107 out = mlx5_vzalloc(out_sz);
1113 MLX5_SET(query_hca_vport_gid_in, in, opcode,
1114 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1116 if (is_group_manager) {
1117 MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1119 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1126 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1128 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1129 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1131 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1135 tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1136 gid->global.subnet_prefix = tmp->global.subnet_prefix;
1137 gid->global.interface_id = tmp->global.interface_id;
1144 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1146 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1147 u8 port_num, u16 vf_num, u16 pkey_index,
1150 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1151 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1152 int is_group_manager;
1161 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1163 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1164 if (pkey_index > tbsz && pkey_index != 0xffff)
1167 if (pkey_index == 0xffff)
1172 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1174 in = kzalloc(in_sz, GFP_KERNEL);
1175 out = kzalloc(out_sz, GFP_KERNEL);
1177 MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1178 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1180 if (is_group_manager) {
1181 MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1183 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1189 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1191 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1192 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1194 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1198 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1199 for (i = 0; i < nout; i++, pkey++,
1200 pkarr += MLX5_ST_SZ_BYTES(pkey))
1201 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1208 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1210 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1214 u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1217 out = mlx5_vzalloc(outlen);
1221 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1225 *min_header = MLX5_GET(query_hca_vport_context_out, out,
1226 hca_vport_context.min_wqe_inline_mode);
1233 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1234 u16 vport, void *in, int inlen)
1236 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
1239 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1241 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1243 MLX5_SET(modify_esw_vport_context_in, in, opcode,
1244 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1246 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
1248 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1253 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1254 u8 insert_mode, u8 strip_mode,
1255 u16 vlan, u8 cfi, u8 pcp)
1257 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1259 memset(in, 0, sizeof(in));
1261 if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1262 MLX5_SET(modify_esw_vport_context_in, in,
1263 esw_vport_context.cvlan_cfi, cfi);
1264 MLX5_SET(modify_esw_vport_context_in, in,
1265 esw_vport_context.cvlan_pcp, pcp);
1266 MLX5_SET(modify_esw_vport_context_in, in,
1267 esw_vport_context.cvlan_id, vlan);
1270 MLX5_SET(modify_esw_vport_context_in, in,
1271 esw_vport_context.vport_cvlan_insert, insert_mode);
1273 MLX5_SET(modify_esw_vport_context_in, in,
1274 esw_vport_context.vport_cvlan_strip, strip_mode);
1276 MLX5_SET(modify_esw_vport_context_in, in, field_select,
1277 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1278 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1280 return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1282 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1284 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1287 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1290 out = mlx5_vzalloc(outlen);
1294 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1298 *mtu = MLX5_GET(query_nic_vport_context_out, out,
1299 nic_vport_context.mtu);
1305 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1307 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1310 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1313 in = mlx5_vzalloc(inlen);
1317 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1318 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1320 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1325 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1327 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1331 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1334 out = mlx5_vzalloc(outlen);
1338 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1342 *min_header = MLX5_GET(query_nic_vport_context_out, out,
1343 nic_vport_context.min_wqe_inline_mode);
1350 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1351 u8 vport, int min_header)
1354 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1357 in = mlx5_vzalloc(inlen);
1361 MLX5_SET(modify_nic_vport_context_in, in,
1362 field_select.min_wqe_inline_mode, 1);
1363 MLX5_SET(modify_nic_vport_context_in, in,
1364 nic_vport_context.min_wqe_inline_mode, min_header);
1365 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1366 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1368 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1373 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1375 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1377 switch (MLX5_CAP_GEN(dev, port_type)) {
1378 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1379 return mlx5_query_hca_min_wqe_header(dev, min_header);
1381 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1382 return mlx5_query_vport_min_wqe_header(dev, min_header);
1388 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1390 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1397 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1400 out = kzalloc(outlen, GFP_KERNEL);
1404 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1408 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1409 nic_vport_context.promisc_uc);
1410 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1411 nic_vport_context.promisc_mc);
1412 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1413 nic_vport_context.promisc_all);
1419 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1421 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1427 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1430 in = mlx5_vzalloc(inlen);
1432 mlx5_core_err(mdev, "failed to allocate inbox\n");
1436 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1437 MLX5_SET(modify_nic_vport_context_in, in,
1438 nic_vport_context.promisc_uc, promisc_uc);
1439 MLX5_SET(modify_nic_vport_context_in, in,
1440 nic_vport_context.promisc_mc, promisc_mc);
1441 MLX5_SET(modify_nic_vport_context_in, in,
1442 nic_vport_context.promisc_all, promisc_all);
1444 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1448 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1450 int mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev *mdev,
1451 enum mlx5_local_lb_selection selection,
1455 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1458 in = mlx5_vzalloc(inlen);
1460 mlx5_core_warn(mdev, "failed to allocate inbox\n");
1464 MLX5_SET(modify_nic_vport_context_in, in, vport_number, 0);
1466 if (selection == MLX5_LOCAL_MC_LB) {
1467 MLX5_SET(modify_nic_vport_context_in, in,
1468 field_select.disable_mc_local_lb, 1);
1469 MLX5_SET(modify_nic_vport_context_in, in,
1470 nic_vport_context.disable_mc_local_lb,
1473 MLX5_SET(modify_nic_vport_context_in, in,
1474 field_select.disable_uc_local_lb, 1);
1475 MLX5_SET(modify_nic_vport_context_in, in,
1476 nic_vport_context.disable_uc_local_lb,
1480 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1485 EXPORT_SYMBOL_GPL(mlx5_nic_vport_modify_local_lb);
1487 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev,
1488 enum mlx5_local_lb_selection selection,
1492 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1495 out = kzalloc(outlen, GFP_KERNEL);
1499 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1503 if (selection == MLX5_LOCAL_MC_LB)
1504 *value = MLX5_GET(query_nic_vport_context_out, out,
1505 nic_vport_context.disable_mc_local_lb);
1507 *value = MLX5_GET(query_nic_vport_context_out, out,
1508 nic_vport_context.disable_uc_local_lb);
1514 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
1516 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1517 u8 port_num, u16 vport_num,
1518 void *out, int out_size)
1520 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1521 int is_group_manager;
1525 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1527 in = mlx5_vzalloc(in_sz);
1531 MLX5_SET(query_vport_counter_in, in, opcode,
1532 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1534 if (is_group_manager) {
1535 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1536 MLX5_SET(query_vport_counter_in, in, vport_number,
1543 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1544 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1546 err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
1552 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1554 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1555 struct mlx5_vport_counters *vc)
1557 int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1561 out = mlx5_vzalloc(out_sz);
1565 err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1569 vc->received_errors.packets =
1570 MLX5_GET64(query_vport_counter_out,
1571 out, received_errors.packets);
1572 vc->received_errors.octets =
1573 MLX5_GET64(query_vport_counter_out,
1574 out, received_errors.octets);
1575 vc->transmit_errors.packets =
1576 MLX5_GET64(query_vport_counter_out,
1577 out, transmit_errors.packets);
1578 vc->transmit_errors.octets =
1579 MLX5_GET64(query_vport_counter_out,
1580 out, transmit_errors.octets);
1581 vc->received_ib_unicast.packets =
1582 MLX5_GET64(query_vport_counter_out,
1583 out, received_ib_unicast.packets);
1584 vc->received_ib_unicast.octets =
1585 MLX5_GET64(query_vport_counter_out,
1586 out, received_ib_unicast.octets);
1587 vc->transmitted_ib_unicast.packets =
1588 MLX5_GET64(query_vport_counter_out,
1589 out, transmitted_ib_unicast.packets);
1590 vc->transmitted_ib_unicast.octets =
1591 MLX5_GET64(query_vport_counter_out,
1592 out, transmitted_ib_unicast.octets);
1593 vc->received_ib_multicast.packets =
1594 MLX5_GET64(query_vport_counter_out,
1595 out, received_ib_multicast.packets);
1596 vc->received_ib_multicast.octets =
1597 MLX5_GET64(query_vport_counter_out,
1598 out, received_ib_multicast.octets);
1599 vc->transmitted_ib_multicast.packets =
1600 MLX5_GET64(query_vport_counter_out,
1601 out, transmitted_ib_multicast.packets);
1602 vc->transmitted_ib_multicast.octets =
1603 MLX5_GET64(query_vport_counter_out,
1604 out, transmitted_ib_multicast.octets);
1605 vc->received_eth_broadcast.packets =
1606 MLX5_GET64(query_vport_counter_out,
1607 out, received_eth_broadcast.packets);
1608 vc->received_eth_broadcast.octets =
1609 MLX5_GET64(query_vport_counter_out,
1610 out, received_eth_broadcast.octets);
1611 vc->transmitted_eth_broadcast.packets =
1612 MLX5_GET64(query_vport_counter_out,
1613 out, transmitted_eth_broadcast.packets);
1614 vc->transmitted_eth_broadcast.octets =
1615 MLX5_GET64(query_vport_counter_out,
1616 out, transmitted_eth_broadcast.octets);
1617 vc->received_eth_unicast.octets =
1618 MLX5_GET64(query_vport_counter_out,
1619 out, received_eth_unicast.octets);
1620 vc->received_eth_unicast.packets =
1621 MLX5_GET64(query_vport_counter_out,
1622 out, received_eth_unicast.packets);
1623 vc->transmitted_eth_unicast.octets =
1624 MLX5_GET64(query_vport_counter_out,
1625 out, transmitted_eth_unicast.octets);
1626 vc->transmitted_eth_unicast.packets =
1627 MLX5_GET64(query_vport_counter_out,
1628 out, transmitted_eth_unicast.packets);
1629 vc->received_eth_multicast.octets =
1630 MLX5_GET64(query_vport_counter_out,
1631 out, received_eth_multicast.octets);
1632 vc->received_eth_multicast.packets =
1633 MLX5_GET64(query_vport_counter_out,
1634 out, received_eth_multicast.packets);
1635 vc->transmitted_eth_multicast.octets =
1636 MLX5_GET64(query_vport_counter_out,
1637 out, transmitted_eth_multicast.octets);
1638 vc->transmitted_eth_multicast.packets =
1639 MLX5_GET64(query_vport_counter_out,
1640 out, transmitted_eth_multicast.packets);
1647 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1648 u64 *sys_image_guid)
1650 switch (MLX5_CAP_GEN(dev, port_type)) {
1651 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1652 return mlx5_query_hca_vport_system_image_guid(dev,
1655 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1656 return mlx5_query_nic_vport_system_image_guid(dev,
1663 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1665 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1667 switch (MLX5_CAP_GEN(dev, port_type)) {
1668 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1669 return mlx5_query_hca_vport_node_guid(dev, node_guid);
1671 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1672 return mlx5_query_nic_vport_node_guid(dev, node_guid);
1678 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1680 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1682 switch (MLX5_CAP_GEN(dev, port_type)) {
1683 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1684 return mlx5_query_hca_vport_port_guid(dev, port_guid);
1686 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1687 return mlx5_query_nic_vport_port_guid(dev, port_guid);
1693 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1695 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1698 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1701 out = mlx5_vzalloc(outlen);
1705 err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1709 *vport_state = MLX5_GET(query_hca_vport_context_out, out,
1710 hca_vport_context.vport_state);
1716 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1718 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1719 u8 port_num, void *out, size_t sz)
1724 in = mlx5_vzalloc(sz);
1730 MLX5_SET(ppcnt_reg, in, local_port, port_num);
1732 MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
1733 err = mlx5_core_access_reg(dev, in, sz, out,
1734 sz, MLX5_REG_PPCNT, 0, 0);