2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
33 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
36 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37 u16 vport, u32 *out, int outlen)
40 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
42 memset(in, 0, sizeof(in));
44 MLX5_SET(query_vport_state_in, in, opcode,
45 MLX5_CMD_OP_QUERY_VPORT_STATE);
46 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
47 MLX5_SET(query_vport_state_in, in, vport_number, vport);
49 MLX5_SET(query_vport_state_in, in, other_vport, 1);
51 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
53 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
58 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
60 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
62 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
64 return MLX5_GET(query_vport_state_out, out, state);
66 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
68 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
70 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
72 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
74 return MLX5_GET(query_vport_state_out, out, admin_state);
76 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
78 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
81 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
82 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
85 memset(in, 0, sizeof(in));
87 MLX5_SET(modify_vport_state_in, in, opcode,
88 MLX5_CMD_OP_MODIFY_VPORT_STATE);
89 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
90 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
93 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
95 MLX5_SET(modify_vport_state_in, in, admin_state, state);
97 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
100 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
104 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
106 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
107 u32 *out, int outlen)
109 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
111 memset(in, 0, sizeof(in));
113 MLX5_SET(query_nic_vport_context_in, in, opcode,
114 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
116 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
118 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
120 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
123 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
127 case MLX5_INTERFACE_PROTOCOL_IB:
128 return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
129 MLX5_QCOUNTER_SETS_NETDEV);
130 case MLX5_INTERFACE_PROTOCOL_ETH:
131 return MLX5_QCOUNTER_SETS_NETDEV;
133 mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
138 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
139 int client_id, u16 *counter_set_id)
141 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
142 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
145 if (mdev->num_q_counter_allocated[client_id] >
146 mlx5_vport_max_q_counter_allocator(mdev, client_id))
149 memset(in, 0, sizeof(in));
150 memset(out, 0, sizeof(out));
152 MLX5_SET(alloc_q_counter_in, in, opcode,
153 MLX5_CMD_OP_ALLOC_Q_COUNTER);
155 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
159 *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
162 mdev->num_q_counter_allocated[client_id]++;
167 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
168 int client_id, u16 counter_set_id)
170 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
171 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
174 if (mdev->num_q_counter_allocated[client_id] <= 0)
177 memset(in, 0, sizeof(in));
178 memset(out, 0, sizeof(out));
180 MLX5_SET(dealloc_q_counter_in, in, opcode,
181 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
182 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
185 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
188 mdev->num_q_counter_allocated[client_id]--;
193 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
199 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
201 memset(in, 0, sizeof(in));
203 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
204 MLX5_SET(query_q_counter_in, in, clear, reset);
205 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
207 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
211 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
213 u32 *out_of_rx_buffer)
215 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
218 memset(out, 0, sizeof(out));
220 err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
226 *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
231 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
232 u16 vport, u8 *min_inline)
234 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
237 err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
239 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
240 nic_vport_context.min_wqe_inline_mode);
243 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
245 void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
248 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
249 case MLX5_CAP_INLINE_MODE_L2:
250 *min_inline_mode = MLX5_INLINE_MODE_L2;
252 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
253 mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
255 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
256 *min_inline_mode = MLX5_INLINE_MODE_NONE;
260 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
262 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
263 u16 vport, u8 min_inline)
265 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
266 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
269 MLX5_SET(modify_nic_vport_context_in, in,
270 field_select.min_wqe_inline_mode, 1);
271 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
272 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
274 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
275 in, nic_vport_context);
276 MLX5_SET(nic_vport_context, nic_vport_ctx,
277 min_wqe_inline_mode, min_inline);
279 return mlx5_modify_nic_vport_context(mdev, in, inlen);
281 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_min_inline);
283 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
287 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
291 out = mlx5_vzalloc(outlen);
295 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
296 nic_vport_context.permanent_address);
298 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
302 ether_addr_copy(addr, &out_addr[2]);
308 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
310 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
314 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
319 in = mlx5_vzalloc(inlen);
321 mlx5_core_warn(mdev, "failed to allocate inbox\n");
325 MLX5_SET(modify_nic_vport_context_in, in,
326 field_select.permanent_address, 1);
327 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
330 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
332 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
333 in, nic_vport_context);
334 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
337 ether_addr_copy(&perm_mac[2], addr);
339 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
345 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
347 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
348 u64 *system_image_guid)
351 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
354 out = mlx5_vzalloc(outlen);
358 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
362 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
363 nic_vport_context.system_image_guid);
368 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
370 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
373 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
376 out = mlx5_vzalloc(outlen);
380 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
384 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
385 nic_vport_context.node_guid);
391 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
393 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
397 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
400 out = mlx5_vzalloc(outlen);
404 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
408 *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
409 nic_vport_context.port_guid);
416 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
420 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
423 out = mlx5_vzalloc(outlen);
427 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
431 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
432 nic_vport_context.qkey_violation_counter);
438 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
440 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
443 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
445 MLX5_SET(modify_nic_vport_context_in, in, opcode,
446 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
448 memset(out, 0, sizeof(out));
449 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
452 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
456 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
459 in = mlx5_vzalloc(inlen);
461 mlx5_core_warn(mdev, "failed to allocate inbox\n");
465 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
466 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
469 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
476 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
477 bool other_vport, u8 *addr)
480 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
481 + MLX5_ST_SZ_BYTES(mac_address_layout);
486 in = mlx5_vzalloc(inlen);
488 mlx5_core_warn(mdev, "failed to allocate inbox\n");
492 MLX5_SET(modify_nic_vport_context_in, in,
493 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
494 MLX5_SET(modify_nic_vport_context_in, in,
495 vport_number, vport);
496 MLX5_SET(modify_nic_vport_context_in, in,
497 other_vport, other_vport);
498 MLX5_SET(modify_nic_vport_context_in, in,
499 field_select.addresses_list, 1);
500 MLX5_SET(modify_nic_vport_context_in, in,
501 nic_vport_context.allowed_list_type,
502 MLX5_NIC_VPORT_LIST_TYPE_UC);
503 MLX5_SET(modify_nic_vport_context_in, in,
504 nic_vport_context.allowed_list_size, 1);
506 mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
507 nic_vport_context.current_uc_mac_address);
508 mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
510 ether_addr_copy(mac_ptr, addr);
512 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
518 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
520 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
521 u32 vport, u64 node_guid)
524 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
526 void *nic_vport_context;
530 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
532 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
535 in = mlx5_vzalloc(inlen);
537 mlx5_core_warn(mdev, "failed to allocate inbox\n");
541 MLX5_SET(modify_nic_vport_context_in, in,
542 field_select.node_guid, 1);
543 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
545 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
547 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
548 in, nic_vport_context);
549 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
551 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
557 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
559 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
560 u32 vport, u64 port_guid)
563 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
565 void *nic_vport_context;
569 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
571 if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
574 in = mlx5_vzalloc(inlen);
576 mlx5_core_warn(mdev, "failed to allocate inbox\n");
580 MLX5_SET(modify_nic_vport_context_in, in,
581 field_select.port_guid, 1);
582 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
584 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
586 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
587 in, nic_vport_context);
588 MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
590 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
596 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
598 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
599 u16 *vlan_list, int list_len)
603 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
604 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
606 int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
608 if (list_len > max_list_size) {
609 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
610 list_len, max_list_size);
614 in = mlx5_vzalloc(inlen);
616 mlx5_core_warn(dev, "failed to allocate inbox\n");
620 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
622 MLX5_SET(modify_nic_vport_context_in, in,
624 MLX5_SET(modify_nic_vport_context_in, in,
625 field_select.addresses_list, 1);
627 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
629 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
630 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
631 MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
633 for (i = 0; i < list_len; i++) {
634 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
635 current_uc_mac_address[i]);
636 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
639 err = mlx5_modify_nic_vport_context(dev, in, inlen);
644 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
646 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
647 u64 *addr_list, size_t addr_list_len)
650 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
651 + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
654 int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
656 if ((int)addr_list_len > max_list_sz) {
657 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
658 (int)addr_list_len, max_list_sz);
662 in = mlx5_vzalloc(inlen);
664 mlx5_core_warn(mdev, "failed to allocate inbox\n");
668 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
670 MLX5_SET(modify_nic_vport_context_in, in,
672 MLX5_SET(modify_nic_vport_context_in, in,
673 field_select.addresses_list, 1);
675 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
677 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
678 MLX5_NIC_VPORT_LIST_TYPE_MC);
679 MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
681 for (i = 0; i < addr_list_len; i++) {
682 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
683 current_uc_mac_address[i]);
684 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
686 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
689 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
695 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
697 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
698 bool promisc_mc, bool promisc_uc,
701 u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
702 u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
705 memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
707 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
709 MLX5_SET(modify_nic_vport_context_in, in,
711 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
713 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
715 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
717 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
719 return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
721 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
723 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
725 enum mlx5_list_type list_type,
726 u8 addr_list[][ETH_ALEN],
729 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
738 req_list_size = *list_size;
740 max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
741 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
742 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
744 if (req_list_size > max_list_size) {
745 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
746 req_list_size, max_list_size);
747 req_list_size = max_list_size;
750 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
751 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
753 memset(in, 0, sizeof(in));
754 out = kzalloc(out_sz, GFP_KERNEL);
758 MLX5_SET(query_nic_vport_context_in, in, opcode,
759 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
760 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
761 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
764 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
766 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
770 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
772 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
775 *list_size = req_list_size;
776 for (i = 0; i < req_list_size; i++) {
777 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
779 current_uc_mac_address[i]) + 2;
780 ether_addr_copy(addr_list[i], mac_addr);
786 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
788 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
789 enum mlx5_list_type list_type,
790 u8 addr_list[][ETH_ALEN],
793 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
801 max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
802 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
803 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
805 if (list_size > max_list_size)
808 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
809 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
811 memset(out, 0, sizeof(out));
812 in = kzalloc(in_sz, GFP_KERNEL);
816 MLX5_SET(modify_nic_vport_context_in, in, opcode,
817 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
818 MLX5_SET(modify_nic_vport_context_in, in,
819 field_select.addresses_list, 1);
821 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
824 MLX5_SET(nic_vport_context, nic_vport_ctx,
825 allowed_list_type, list_type);
826 MLX5_SET(nic_vport_context, nic_vport_ctx,
827 allowed_list_size, list_size);
829 for (i = 0; i < list_size; i++) {
830 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
832 current_uc_mac_address[i]) + 2;
833 ether_addr_copy(curr_mac, addr_list[i]);
836 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
840 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
842 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
847 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
856 req_list_size = *size;
857 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
858 if (req_list_size > max_list_size) {
859 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
860 req_list_size, max_list_size);
861 req_list_size = max_list_size;
864 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
865 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
867 memset(in, 0, sizeof(in));
868 out = kzalloc(out_sz, GFP_KERNEL);
872 MLX5_SET(query_nic_vport_context_in, in, opcode,
873 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
874 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
875 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
876 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
879 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
881 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
885 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
887 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
890 *size = req_list_size;
891 for (i = 0; i < req_list_size; i++) {
892 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
894 current_uc_mac_address[i]);
895 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
901 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
903 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
907 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
915 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
917 if (list_size > max_list_size)
920 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
921 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
923 memset(out, 0, sizeof(out));
924 in = kzalloc(in_sz, GFP_KERNEL);
928 MLX5_SET(modify_nic_vport_context_in, in, opcode,
929 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
930 MLX5_SET(modify_nic_vport_context_in, in,
931 field_select.addresses_list, 1);
933 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
936 MLX5_SET(nic_vport_context, nic_vport_ctx,
937 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
938 MLX5_SET(nic_vport_context, nic_vport_ctx,
939 allowed_list_size, list_size);
941 for (i = 0; i < list_size; i++) {
942 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
944 current_uc_mac_address[i]);
945 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
948 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
952 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
954 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
957 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
960 out = kzalloc(outlen, GFP_KERNEL);
964 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
968 *enable = MLX5_GET(query_nic_vport_context_out, out,
969 nic_vport_context.roce_en);
975 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
977 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
981 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
985 in = mlx5_vzalloc(inlen);
987 mlx5_core_warn(mdev, "failed to allocate inbox\n");
991 MLX5_SET(modify_nic_vport_context_in, in,
992 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
993 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
994 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
995 MLX5_SET(modify_nic_vport_context_in, in,
996 field_select.permanent_address, 1);
997 mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
998 nic_vport_context.permanent_address.mac_addr_47_32);
999 ether_addr_copy(mac_ptr, addr);
1001 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1007 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
1009 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
1011 return mlx5_nic_vport_enable_disable_roce(mdev, 1);
1013 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1015 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1017 return mlx5_nic_vport_enable_disable_roce(mdev, 0);
1019 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1021 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
1022 u8 port_num, u8 vport_num, u32 *out,
1025 u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
1026 int is_group_manager;
1028 is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
1030 memset(in, 0, sizeof(in));
1032 MLX5_SET(query_hca_vport_context_in, in, opcode,
1033 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
1036 if (is_group_manager) {
1037 MLX5_SET(query_hca_vport_context_in, in, other_vport,
1039 MLX5_SET(query_hca_vport_context_in, in, vport_number,
1046 if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1047 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1049 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
1052 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1053 u64 *system_image_guid)
1056 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1059 out = mlx5_vzalloc(outlen);
1063 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1067 *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1068 hca_vport_context.system_image_guid);
1074 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1076 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1079 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1082 out = mlx5_vzalloc(outlen);
1086 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1090 *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1091 hca_vport_context.node_guid);
1097 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1099 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1103 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1106 out = mlx5_vzalloc(outlen);
1110 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1114 *port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1115 hca_vport_context.port_guid);
1122 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1123 u16 vport_num, u16 gid_index, union ib_gid *gid)
1125 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1126 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1127 int is_group_manager;
1135 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1136 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1138 if (gid_index > tbsz && gid_index != 0xffff)
1141 if (gid_index == 0xffff)
1146 out_sz += nout * sizeof(*gid);
1148 in = mlx5_vzalloc(in_sz);
1149 out = mlx5_vzalloc(out_sz);
1155 MLX5_SET(query_hca_vport_gid_in, in, opcode,
1156 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1158 if (is_group_manager) {
1159 MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1161 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1168 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1170 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1171 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1173 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1177 err = mlx5_cmd_status_to_err_v2(out);
1181 tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1182 gid->global.subnet_prefix = tmp->global.subnet_prefix;
1183 gid->global.interface_id = tmp->global.interface_id;
1190 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1192 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1193 u8 port_num, u16 vf_num, u16 pkey_index,
1196 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1197 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1198 int is_group_manager;
1207 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1209 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1210 if (pkey_index > tbsz && pkey_index != 0xffff)
1213 if (pkey_index == 0xffff)
1218 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1220 in = kzalloc(in_sz, GFP_KERNEL);
1221 out = kzalloc(out_sz, GFP_KERNEL);
1223 MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1224 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1226 if (is_group_manager) {
1227 MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1229 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1235 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1237 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1238 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1240 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1244 err = mlx5_cmd_status_to_err_v2(out);
1248 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1249 for (i = 0; i < nout; i++, pkey++,
1250 pkarr += MLX5_ST_SZ_BYTES(pkey))
1251 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1258 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1260 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1264 u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1267 out = mlx5_vzalloc(outlen);
1271 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1275 *min_header = MLX5_GET(query_hca_vport_context_out, out,
1276 hca_vport_context.min_wqe_inline_mode);
1283 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1284 u16 vport, void *in, int inlen)
1286 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
1289 memset(out, 0, sizeof(out));
1291 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1293 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1295 MLX5_SET(modify_esw_vport_context_in, in, opcode,
1296 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1298 err = mlx5_cmd_exec_check_status(mdev, in, inlen,
1301 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1306 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1307 u8 insert_mode, u8 strip_mode,
1308 u16 vlan, u8 cfi, u8 pcp)
1310 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1312 memset(in, 0, sizeof(in));
1314 if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1315 MLX5_SET(modify_esw_vport_context_in, in,
1316 esw_vport_context.cvlan_cfi, cfi);
1317 MLX5_SET(modify_esw_vport_context_in, in,
1318 esw_vport_context.cvlan_pcp, pcp);
1319 MLX5_SET(modify_esw_vport_context_in, in,
1320 esw_vport_context.cvlan_id, vlan);
1323 MLX5_SET(modify_esw_vport_context_in, in,
1324 esw_vport_context.vport_cvlan_insert, insert_mode);
1326 MLX5_SET(modify_esw_vport_context_in, in,
1327 esw_vport_context.vport_cvlan_strip, strip_mode);
1329 MLX5_SET(modify_esw_vport_context_in, in, field_select,
1330 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1331 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1333 return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1335 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1337 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1340 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1343 out = mlx5_vzalloc(outlen);
1347 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1351 *mtu = MLX5_GET(query_nic_vport_context_out, out,
1352 nic_vport_context.mtu);
1358 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1360 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1363 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1366 in = mlx5_vzalloc(inlen);
1370 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1371 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1373 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1378 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1380 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1384 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1387 out = mlx5_vzalloc(outlen);
1391 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1395 *min_header = MLX5_GET(query_nic_vport_context_out, out,
1396 nic_vport_context.min_wqe_inline_mode);
1403 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1404 u8 vport, int min_header)
1407 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1410 in = mlx5_vzalloc(inlen);
1414 MLX5_SET(modify_nic_vport_context_in, in,
1415 field_select.min_wqe_inline_mode, 1);
1416 MLX5_SET(modify_nic_vport_context_in, in,
1417 nic_vport_context.min_wqe_inline_mode, min_header);
1418 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1419 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1421 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1426 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1428 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1430 switch (MLX5_CAP_GEN(dev, port_type)) {
1431 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1432 return mlx5_query_hca_min_wqe_header(dev, min_header);
1434 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1435 return mlx5_query_vport_min_wqe_header(dev, min_header);
1441 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1443 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1450 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1453 out = kzalloc(outlen, GFP_KERNEL);
1457 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1461 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1462 nic_vport_context.promisc_uc);
1463 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1464 nic_vport_context.promisc_mc);
1465 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1466 nic_vport_context.promisc_all);
1472 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1474 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1480 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1483 in = mlx5_vzalloc(inlen);
1485 mlx5_core_err(mdev, "failed to allocate inbox\n");
1489 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1490 MLX5_SET(modify_nic_vport_context_in, in,
1491 nic_vport_context.promisc_uc, promisc_uc);
1492 MLX5_SET(modify_nic_vport_context_in, in,
1493 nic_vport_context.promisc_mc, promisc_mc);
1494 MLX5_SET(modify_nic_vport_context_in, in,
1495 nic_vport_context.promisc_all, promisc_all);
1497 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1501 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1503 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1504 u8 port_num, u16 vport_num,
1505 void *out, int out_size)
1507 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1508 int is_group_manager;
1512 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1514 in = mlx5_vzalloc(in_sz);
1518 MLX5_SET(query_vport_counter_in, in, opcode,
1519 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1521 if (is_group_manager) {
1522 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1523 MLX5_SET(query_vport_counter_in, in, vport_number,
1530 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1531 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1533 err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
1536 err = mlx5_cmd_status_to_err_v2(out);
1544 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1546 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1547 struct mlx5_vport_counters *vc)
1549 int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1553 out = mlx5_vzalloc(out_sz);
1557 err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1561 vc->received_errors.packets =
1562 MLX5_GET64(query_vport_counter_out,
1563 out, received_errors.packets);
1564 vc->received_errors.octets =
1565 MLX5_GET64(query_vport_counter_out,
1566 out, received_errors.octets);
1567 vc->transmit_errors.packets =
1568 MLX5_GET64(query_vport_counter_out,
1569 out, transmit_errors.packets);
1570 vc->transmit_errors.octets =
1571 MLX5_GET64(query_vport_counter_out,
1572 out, transmit_errors.octets);
1573 vc->received_ib_unicast.packets =
1574 MLX5_GET64(query_vport_counter_out,
1575 out, received_ib_unicast.packets);
1576 vc->received_ib_unicast.octets =
1577 MLX5_GET64(query_vport_counter_out,
1578 out, received_ib_unicast.octets);
1579 vc->transmitted_ib_unicast.packets =
1580 MLX5_GET64(query_vport_counter_out,
1581 out, transmitted_ib_unicast.packets);
1582 vc->transmitted_ib_unicast.octets =
1583 MLX5_GET64(query_vport_counter_out,
1584 out, transmitted_ib_unicast.octets);
1585 vc->received_ib_multicast.packets =
1586 MLX5_GET64(query_vport_counter_out,
1587 out, received_ib_multicast.packets);
1588 vc->received_ib_multicast.octets =
1589 MLX5_GET64(query_vport_counter_out,
1590 out, received_ib_multicast.octets);
1591 vc->transmitted_ib_multicast.packets =
1592 MLX5_GET64(query_vport_counter_out,
1593 out, transmitted_ib_multicast.packets);
1594 vc->transmitted_ib_multicast.octets =
1595 MLX5_GET64(query_vport_counter_out,
1596 out, transmitted_ib_multicast.octets);
1597 vc->received_eth_broadcast.packets =
1598 MLX5_GET64(query_vport_counter_out,
1599 out, received_eth_broadcast.packets);
1600 vc->received_eth_broadcast.octets =
1601 MLX5_GET64(query_vport_counter_out,
1602 out, received_eth_broadcast.octets);
1603 vc->transmitted_eth_broadcast.packets =
1604 MLX5_GET64(query_vport_counter_out,
1605 out, transmitted_eth_broadcast.packets);
1606 vc->transmitted_eth_broadcast.octets =
1607 MLX5_GET64(query_vport_counter_out,
1608 out, transmitted_eth_broadcast.octets);
1609 vc->received_eth_unicast.octets =
1610 MLX5_GET64(query_vport_counter_out,
1611 out, received_eth_unicast.octets);
1612 vc->received_eth_unicast.packets =
1613 MLX5_GET64(query_vport_counter_out,
1614 out, received_eth_unicast.packets);
1615 vc->transmitted_eth_unicast.octets =
1616 MLX5_GET64(query_vport_counter_out,
1617 out, transmitted_eth_unicast.octets);
1618 vc->transmitted_eth_unicast.packets =
1619 MLX5_GET64(query_vport_counter_out,
1620 out, transmitted_eth_unicast.packets);
1621 vc->received_eth_multicast.octets =
1622 MLX5_GET64(query_vport_counter_out,
1623 out, received_eth_multicast.octets);
1624 vc->received_eth_multicast.packets =
1625 MLX5_GET64(query_vport_counter_out,
1626 out, received_eth_multicast.packets);
1627 vc->transmitted_eth_multicast.octets =
1628 MLX5_GET64(query_vport_counter_out,
1629 out, transmitted_eth_multicast.octets);
1630 vc->transmitted_eth_multicast.packets =
1631 MLX5_GET64(query_vport_counter_out,
1632 out, transmitted_eth_multicast.packets);
1639 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1640 u64 *sys_image_guid)
1642 switch (MLX5_CAP_GEN(dev, port_type)) {
1643 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1644 return mlx5_query_hca_vport_system_image_guid(dev,
1647 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1648 return mlx5_query_nic_vport_system_image_guid(dev,
1655 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1657 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1659 switch (MLX5_CAP_GEN(dev, port_type)) {
1660 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1661 return mlx5_query_hca_vport_node_guid(dev, node_guid);
1663 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1664 return mlx5_query_nic_vport_node_guid(dev, node_guid);
1670 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1672 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1674 switch (MLX5_CAP_GEN(dev, port_type)) {
1675 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1676 return mlx5_query_hca_vport_port_guid(dev, port_guid);
1678 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1679 return mlx5_query_nic_vport_port_guid(dev, port_guid);
1685 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1687 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1690 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1693 out = mlx5_vzalloc(outlen);
1697 err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1701 *vport_state = MLX5_GET(query_hca_vport_context_out, out,
1702 hca_vport_context.vport_state);
1708 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);