2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
33 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
36 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37 u16 vport, u32 *out, int outlen)
40 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
42 memset(in, 0, sizeof(in));
44 MLX5_SET(query_vport_state_in, in, opcode,
45 MLX5_CMD_OP_QUERY_VPORT_STATE);
46 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
47 MLX5_SET(query_vport_state_in, in, vport_number, vport);
49 MLX5_SET(query_vport_state_in, in, other_vport, 1);
51 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
53 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
58 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
60 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
62 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
64 return MLX5_GET(query_vport_state_out, out, state);
66 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
68 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
70 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
72 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
74 return MLX5_GET(query_vport_state_out, out, admin_state);
76 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
78 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
81 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
82 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
85 memset(in, 0, sizeof(in));
87 MLX5_SET(modify_vport_state_in, in, opcode,
88 MLX5_CMD_OP_MODIFY_VPORT_STATE);
89 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
90 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
93 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
95 MLX5_SET(modify_vport_state_in, in, admin_state, state);
97 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
100 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
104 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
106 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
107 u32 *out, int outlen)
109 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
111 memset(in, 0, sizeof(in));
113 MLX5_SET(query_nic_vport_context_in, in, opcode,
114 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
116 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
118 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
120 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
123 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
127 case MLX5_INTERFACE_PROTOCOL_IB:
128 return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
129 MLX5_QCOUNTER_SETS_NETDEV);
130 case MLX5_INTERFACE_PROTOCOL_ETH:
131 return MLX5_QCOUNTER_SETS_NETDEV;
133 mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
138 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
139 int client_id, u16 *counter_set_id)
141 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
142 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
145 if (mdev->num_q_counter_allocated[client_id] >
146 mlx5_vport_max_q_counter_allocator(mdev, client_id))
149 memset(in, 0, sizeof(in));
150 memset(out, 0, sizeof(out));
152 MLX5_SET(alloc_q_counter_in, in, opcode,
153 MLX5_CMD_OP_ALLOC_Q_COUNTER);
155 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
159 *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
162 mdev->num_q_counter_allocated[client_id]++;
167 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
168 int client_id, u16 counter_set_id)
170 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
171 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
174 if (mdev->num_q_counter_allocated[client_id] <= 0)
177 memset(in, 0, sizeof(in));
178 memset(out, 0, sizeof(out));
180 MLX5_SET(dealloc_q_counter_in, in, opcode,
181 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
182 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
185 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
188 mdev->num_q_counter_allocated[client_id]--;
193 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
199 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
201 memset(in, 0, sizeof(in));
203 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
204 MLX5_SET(query_q_counter_in, in, clear, reset);
205 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
207 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
211 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
213 u32 *out_of_rx_buffer)
215 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
218 memset(out, 0, sizeof(out));
220 err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
226 *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
231 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
235 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
239 out = mlx5_vzalloc(outlen);
243 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
244 nic_vport_context.permanent_address);
246 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
250 ether_addr_copy(addr, &out_addr[2]);
256 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
258 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
262 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
267 in = mlx5_vzalloc(inlen);
269 mlx5_core_warn(mdev, "failed to allocate inbox\n");
273 MLX5_SET(modify_nic_vport_context_in, in,
274 field_select.permanent_address, 1);
275 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
278 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
280 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
281 in, nic_vport_context);
282 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
285 ether_addr_copy(&perm_mac[2], addr);
287 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
293 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
295 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
296 u64 *system_image_guid)
299 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
302 out = mlx5_vzalloc(outlen);
306 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
310 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
311 nic_vport_context.system_image_guid);
316 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
318 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
321 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
324 out = mlx5_vzalloc(outlen);
328 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
332 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
333 nic_vport_context.node_guid);
339 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
341 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
345 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
348 out = mlx5_vzalloc(outlen);
352 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
356 *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
357 nic_vport_context.port_guid);
364 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
368 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
371 out = mlx5_vzalloc(outlen);
375 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
379 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
380 nic_vport_context.qkey_violation_counter);
386 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
388 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
391 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
393 MLX5_SET(modify_nic_vport_context_in, in, opcode,
394 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
396 memset(out, 0, sizeof(out));
397 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
400 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
404 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
407 in = mlx5_vzalloc(inlen);
409 mlx5_core_warn(mdev, "failed to allocate inbox\n");
413 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
414 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
417 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
424 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
425 bool other_vport, u8 *addr)
428 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
429 + MLX5_ST_SZ_BYTES(mac_address_layout);
434 in = mlx5_vzalloc(inlen);
436 mlx5_core_warn(mdev, "failed to allocate inbox\n");
440 MLX5_SET(modify_nic_vport_context_in, in,
441 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
442 MLX5_SET(modify_nic_vport_context_in, in,
443 vport_number, vport);
444 MLX5_SET(modify_nic_vport_context_in, in,
445 other_vport, other_vport);
446 MLX5_SET(modify_nic_vport_context_in, in,
447 field_select.addresses_list, 1);
448 MLX5_SET(modify_nic_vport_context_in, in,
449 nic_vport_context.allowed_list_type,
450 MLX5_NIC_VPORT_LIST_TYPE_UC);
451 MLX5_SET(modify_nic_vport_context_in, in,
452 nic_vport_context.allowed_list_size, 1);
454 mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
455 nic_vport_context.current_uc_mac_address);
456 mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
458 ether_addr_copy(mac_ptr, addr);
460 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
466 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
468 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
469 u32 vport, u64 node_guid)
472 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
474 void *nic_vport_context;
478 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
480 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
483 in = mlx5_vzalloc(inlen);
485 mlx5_core_warn(mdev, "failed to allocate inbox\n");
489 MLX5_SET(modify_nic_vport_context_in, in,
490 field_select.node_guid, 1);
491 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
493 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
495 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
496 in, nic_vport_context);
497 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
499 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
505 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
507 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
508 u32 vport, u64 port_guid)
511 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
513 void *nic_vport_context;
517 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
519 if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
522 in = mlx5_vzalloc(inlen);
524 mlx5_core_warn(mdev, "failed to allocate inbox\n");
528 MLX5_SET(modify_nic_vport_context_in, in,
529 field_select.port_guid, 1);
530 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
532 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
534 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
535 in, nic_vport_context);
536 MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
538 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
544 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
546 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
547 u16 *vlan_list, int list_len)
551 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
552 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
554 int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
556 if (list_len > max_list_size) {
557 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
558 list_len, max_list_size);
562 in = mlx5_vzalloc(inlen);
564 mlx5_core_warn(dev, "failed to allocate inbox\n");
568 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
570 MLX5_SET(modify_nic_vport_context_in, in,
572 MLX5_SET(modify_nic_vport_context_in, in,
573 field_select.addresses_list, 1);
575 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
577 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
578 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
579 MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
581 for (i = 0; i < list_len; i++) {
582 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
583 current_uc_mac_address[i]);
584 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
587 err = mlx5_modify_nic_vport_context(dev, in, inlen);
592 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
594 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
595 u64 *addr_list, size_t addr_list_len)
598 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
599 + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
602 int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
604 if ((int)addr_list_len > max_list_sz) {
605 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
606 (int)addr_list_len, max_list_sz);
610 in = mlx5_vzalloc(inlen);
612 mlx5_core_warn(mdev, "failed to allocate inbox\n");
616 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
618 MLX5_SET(modify_nic_vport_context_in, in,
620 MLX5_SET(modify_nic_vport_context_in, in,
621 field_select.addresses_list, 1);
623 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
625 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
626 MLX5_NIC_VPORT_LIST_TYPE_MC);
627 MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
629 for (i = 0; i < addr_list_len; i++) {
630 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
631 current_uc_mac_address[i]);
632 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
634 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
637 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
643 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
645 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
646 bool promisc_mc, bool promisc_uc,
649 u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
650 u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
653 memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
655 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
657 MLX5_SET(modify_nic_vport_context_in, in,
659 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
661 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
663 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
665 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
667 return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
669 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
671 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
673 enum mlx5_list_type list_type,
674 u8 addr_list[][ETH_ALEN],
677 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
686 req_list_size = *list_size;
688 max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
689 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
690 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
692 if (req_list_size > max_list_size) {
693 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
694 req_list_size, max_list_size);
695 req_list_size = max_list_size;
698 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
699 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
701 memset(in, 0, sizeof(in));
702 out = kzalloc(out_sz, GFP_KERNEL);
706 MLX5_SET(query_nic_vport_context_in, in, opcode,
707 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
708 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
709 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
712 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
714 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
718 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
720 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
723 *list_size = req_list_size;
724 for (i = 0; i < req_list_size; i++) {
725 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
727 current_uc_mac_address[i]) + 2;
728 ether_addr_copy(addr_list[i], mac_addr);
734 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
736 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
737 enum mlx5_list_type list_type,
738 u8 addr_list[][ETH_ALEN],
741 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
749 max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
750 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
751 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
753 if (list_size > max_list_size)
756 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
757 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
759 memset(out, 0, sizeof(out));
760 in = kzalloc(in_sz, GFP_KERNEL);
764 MLX5_SET(modify_nic_vport_context_in, in, opcode,
765 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
766 MLX5_SET(modify_nic_vport_context_in, in,
767 field_select.addresses_list, 1);
769 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
772 MLX5_SET(nic_vport_context, nic_vport_ctx,
773 allowed_list_type, list_type);
774 MLX5_SET(nic_vport_context, nic_vport_ctx,
775 allowed_list_size, list_size);
777 for (i = 0; i < list_size; i++) {
778 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
780 current_uc_mac_address[i]) + 2;
781 ether_addr_copy(curr_mac, addr_list[i]);
784 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
788 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
790 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
795 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
804 req_list_size = *size;
805 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
806 if (req_list_size > max_list_size) {
807 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
808 req_list_size, max_list_size);
809 req_list_size = max_list_size;
812 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
813 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
815 memset(in, 0, sizeof(in));
816 out = kzalloc(out_sz, GFP_KERNEL);
820 MLX5_SET(query_nic_vport_context_in, in, opcode,
821 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
822 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
823 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
824 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
827 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
829 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
833 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
835 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
838 *size = req_list_size;
839 for (i = 0; i < req_list_size; i++) {
840 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
842 current_uc_mac_address[i]);
843 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
849 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
851 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
855 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
863 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
865 if (list_size > max_list_size)
868 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
869 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
871 memset(out, 0, sizeof(out));
872 in = kzalloc(in_sz, GFP_KERNEL);
876 MLX5_SET(modify_nic_vport_context_in, in, opcode,
877 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
878 MLX5_SET(modify_nic_vport_context_in, in,
879 field_select.addresses_list, 1);
881 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
884 MLX5_SET(nic_vport_context, nic_vport_ctx,
885 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
886 MLX5_SET(nic_vport_context, nic_vport_ctx,
887 allowed_list_size, list_size);
889 for (i = 0; i < list_size; i++) {
890 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
892 current_uc_mac_address[i]);
893 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
896 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
900 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
902 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
905 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
908 out = kzalloc(outlen, GFP_KERNEL);
912 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
916 *enable = MLX5_GET(query_nic_vport_context_out, out,
917 nic_vport_context.roce_en);
923 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
925 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
929 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
933 in = mlx5_vzalloc(inlen);
935 mlx5_core_warn(mdev, "failed to allocate inbox\n");
939 MLX5_SET(modify_nic_vport_context_in, in,
940 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
941 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
942 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
943 MLX5_SET(modify_nic_vport_context_in, in,
944 field_select.permanent_address, 1);
945 mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
946 nic_vport_context.permanent_address.mac_addr_47_32);
947 ether_addr_copy(mac_ptr, addr);
949 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
955 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
957 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
959 return mlx5_nic_vport_enable_disable_roce(mdev, 1);
961 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
963 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
965 return mlx5_nic_vport_enable_disable_roce(mdev, 0);
967 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
969 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
970 int vf, u8 port_num, void *out,
973 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
974 int is_group_manager;
978 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
979 in = mlx5_vzalloc(in_sz);
985 MLX5_SET(query_vport_counter_in, in, opcode,
986 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
988 if (is_group_manager) {
989 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
990 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
996 if (MLX5_CAP_GEN(dev, num_ports) == 2)
997 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
999 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1004 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1006 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
1007 u8 port_num, u8 vport_num, u32 *out,
1010 u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
1011 int is_group_manager;
1013 is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
1015 memset(in, 0, sizeof(in));
1017 MLX5_SET(query_hca_vport_context_in, in, opcode,
1018 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
1021 if (is_group_manager) {
1022 MLX5_SET(query_hca_vport_context_in, in, other_vport,
1024 MLX5_SET(query_hca_vport_context_in, in, vport_number,
1031 if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1032 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1034 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
1037 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1038 u64 *system_image_guid)
1041 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1044 out = mlx5_vzalloc(outlen);
1048 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1052 *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1053 hca_vport_context.system_image_guid);
1059 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1061 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1064 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1067 out = mlx5_vzalloc(outlen);
1071 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1075 *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1076 hca_vport_context.node_guid);
1082 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1084 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1088 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1091 out = mlx5_vzalloc(outlen);
1095 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1099 *port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1100 hca_vport_context.port_guid);
1107 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1108 u16 vport_num, u16 gid_index, union ib_gid *gid)
1110 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1111 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1112 int is_group_manager;
1120 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1121 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1123 if (gid_index > tbsz && gid_index != 0xffff)
1126 if (gid_index == 0xffff)
1131 out_sz += nout * sizeof(*gid);
1133 in = mlx5_vzalloc(in_sz);
1134 out = mlx5_vzalloc(out_sz);
1140 MLX5_SET(query_hca_vport_gid_in, in, opcode,
1141 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1143 if (is_group_manager) {
1144 MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1146 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1153 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1155 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1156 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1158 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1162 err = mlx5_cmd_status_to_err_v2(out);
1166 tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1167 gid->global.subnet_prefix = tmp->global.subnet_prefix;
1168 gid->global.interface_id = tmp->global.interface_id;
1175 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1177 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1178 u8 port_num, u16 vf_num, u16 pkey_index,
1181 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1182 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1183 int is_group_manager;
1192 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1194 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1195 if (pkey_index > tbsz && pkey_index != 0xffff)
1198 if (pkey_index == 0xffff)
1203 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1205 in = kzalloc(in_sz, GFP_KERNEL);
1206 out = kzalloc(out_sz, GFP_KERNEL);
1208 MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1209 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1211 if (is_group_manager) {
1212 MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1214 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1220 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1222 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1223 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1225 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1229 err = mlx5_cmd_status_to_err_v2(out);
1233 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1234 for (i = 0; i < nout; i++, pkey++,
1235 pkarr += MLX5_ST_SZ_BYTES(pkey))
1236 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1243 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1245 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1249 u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1252 out = mlx5_vzalloc(outlen);
1256 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1260 *min_header = MLX5_GET(query_hca_vport_context_out, out,
1261 hca_vport_context.min_wqe_inline_mode);
1268 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1269 u16 vport, void *in, int inlen)
1271 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
1274 memset(out, 0, sizeof(out));
1276 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1278 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1280 MLX5_SET(modify_esw_vport_context_in, in, opcode,
1281 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1283 err = mlx5_cmd_exec_check_status(mdev, in, inlen,
1286 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1291 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1292 u8 insert_mode, u8 strip_mode,
1293 u16 vlan, u8 cfi, u8 pcp)
1295 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1297 memset(in, 0, sizeof(in));
1299 if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1300 MLX5_SET(modify_esw_vport_context_in, in,
1301 esw_vport_context.cvlan_cfi, cfi);
1302 MLX5_SET(modify_esw_vport_context_in, in,
1303 esw_vport_context.cvlan_pcp, pcp);
1304 MLX5_SET(modify_esw_vport_context_in, in,
1305 esw_vport_context.cvlan_id, vlan);
1308 MLX5_SET(modify_esw_vport_context_in, in,
1309 esw_vport_context.vport_cvlan_insert, insert_mode);
1311 MLX5_SET(modify_esw_vport_context_in, in,
1312 esw_vport_context.vport_cvlan_strip, strip_mode);
1314 MLX5_SET(modify_esw_vport_context_in, in, field_select,
1315 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1316 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1318 return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1320 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1322 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1325 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1328 out = mlx5_vzalloc(outlen);
1332 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1336 *mtu = MLX5_GET(query_nic_vport_context_out, out,
1337 nic_vport_context.mtu);
1343 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1345 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1348 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1351 in = mlx5_vzalloc(inlen);
1355 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1356 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1358 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1363 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1365 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1369 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1372 out = mlx5_vzalloc(outlen);
1376 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1380 *min_header = MLX5_GET(query_nic_vport_context_out, out,
1381 nic_vport_context.min_wqe_inline_mode);
1388 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1389 u8 vport, int min_header)
1392 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1395 in = mlx5_vzalloc(inlen);
1399 MLX5_SET(modify_nic_vport_context_in, in,
1400 field_select.min_wqe_inline_mode, 1);
1401 MLX5_SET(modify_nic_vport_context_in, in,
1402 nic_vport_context.min_wqe_inline_mode, min_header);
1403 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1404 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1406 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1411 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1413 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1415 switch (MLX5_CAP_GEN(dev, port_type)) {
1416 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1417 return mlx5_query_hca_min_wqe_header(dev, min_header);
1419 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1420 return mlx5_query_vport_min_wqe_header(dev, min_header);
1426 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1428 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1435 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1438 out = kzalloc(outlen, GFP_KERNEL);
1442 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1446 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1447 nic_vport_context.promisc_uc);
1448 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1449 nic_vport_context.promisc_mc);
1450 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1451 nic_vport_context.promisc_all);
1457 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1459 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1465 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1468 in = mlx5_vzalloc(inlen);
1470 mlx5_core_err(mdev, "failed to allocate inbox\n");
1474 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1475 MLX5_SET(modify_nic_vport_context_in, in,
1476 nic_vport_context.promisc_uc, promisc_uc);
1477 MLX5_SET(modify_nic_vport_context_in, in,
1478 nic_vport_context.promisc_mc, promisc_mc);
1479 MLX5_SET(modify_nic_vport_context_in, in,
1480 nic_vport_context.promisc_all, promisc_all);
1482 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1486 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1488 int mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev *mdev,
1489 enum mlx5_local_lb_selection selection,
1493 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1496 in = mlx5_vzalloc(inlen);
1498 mlx5_core_warn(mdev, "failed to allocate inbox\n");
1502 MLX5_SET(modify_nic_vport_context_in, in, vport_number, 0);
1504 if (selection == MLX5_LOCAL_MC_LB) {
1505 MLX5_SET(modify_nic_vport_context_in, in,
1506 field_select.disable_mc_local_lb, 1);
1507 MLX5_SET(modify_nic_vport_context_in, in,
1508 nic_vport_context.disable_mc_local_lb,
1511 MLX5_SET(modify_nic_vport_context_in, in,
1512 field_select.disable_uc_local_lb, 1);
1513 MLX5_SET(modify_nic_vport_context_in, in,
1514 nic_vport_context.disable_uc_local_lb,
1518 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1523 EXPORT_SYMBOL_GPL(mlx5_nic_vport_modify_local_lb);
1525 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev,
1526 enum mlx5_local_lb_selection selection,
1530 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1533 out = kzalloc(outlen, GFP_KERNEL);
1537 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1541 if (selection == MLX5_LOCAL_MC_LB)
1542 *value = MLX5_GET(query_nic_vport_context_out, out,
1543 nic_vport_context.disable_mc_local_lb);
1545 *value = MLX5_GET(query_nic_vport_context_out, out,
1546 nic_vport_context.disable_uc_local_lb);
1552 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
1554 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1555 u8 port_num, u16 vport_num,
1556 void *out, int out_size)
1558 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1559 int is_group_manager;
1563 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1565 in = mlx5_vzalloc(in_sz);
1569 MLX5_SET(query_vport_counter_in, in, opcode,
1570 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1572 if (is_group_manager) {
1573 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1574 MLX5_SET(query_vport_counter_in, in, vport_number,
1581 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1582 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1584 err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
1587 err = mlx5_cmd_status_to_err_v2(out);
1595 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1597 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1598 struct mlx5_vport_counters *vc)
1600 int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1604 out = mlx5_vzalloc(out_sz);
1608 err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1612 vc->received_errors.packets =
1613 MLX5_GET64(query_vport_counter_out,
1614 out, received_errors.packets);
1615 vc->received_errors.octets =
1616 MLX5_GET64(query_vport_counter_out,
1617 out, received_errors.octets);
1618 vc->transmit_errors.packets =
1619 MLX5_GET64(query_vport_counter_out,
1620 out, transmit_errors.packets);
1621 vc->transmit_errors.octets =
1622 MLX5_GET64(query_vport_counter_out,
1623 out, transmit_errors.octets);
1624 vc->received_ib_unicast.packets =
1625 MLX5_GET64(query_vport_counter_out,
1626 out, received_ib_unicast.packets);
1627 vc->received_ib_unicast.octets =
1628 MLX5_GET64(query_vport_counter_out,
1629 out, received_ib_unicast.octets);
1630 vc->transmitted_ib_unicast.packets =
1631 MLX5_GET64(query_vport_counter_out,
1632 out, transmitted_ib_unicast.packets);
1633 vc->transmitted_ib_unicast.octets =
1634 MLX5_GET64(query_vport_counter_out,
1635 out, transmitted_ib_unicast.octets);
1636 vc->received_ib_multicast.packets =
1637 MLX5_GET64(query_vport_counter_out,
1638 out, received_ib_multicast.packets);
1639 vc->received_ib_multicast.octets =
1640 MLX5_GET64(query_vport_counter_out,
1641 out, received_ib_multicast.octets);
1642 vc->transmitted_ib_multicast.packets =
1643 MLX5_GET64(query_vport_counter_out,
1644 out, transmitted_ib_multicast.packets);
1645 vc->transmitted_ib_multicast.octets =
1646 MLX5_GET64(query_vport_counter_out,
1647 out, transmitted_ib_multicast.octets);
1648 vc->received_eth_broadcast.packets =
1649 MLX5_GET64(query_vport_counter_out,
1650 out, received_eth_broadcast.packets);
1651 vc->received_eth_broadcast.octets =
1652 MLX5_GET64(query_vport_counter_out,
1653 out, received_eth_broadcast.octets);
1654 vc->transmitted_eth_broadcast.packets =
1655 MLX5_GET64(query_vport_counter_out,
1656 out, transmitted_eth_broadcast.packets);
1657 vc->transmitted_eth_broadcast.octets =
1658 MLX5_GET64(query_vport_counter_out,
1659 out, transmitted_eth_broadcast.octets);
1660 vc->received_eth_unicast.octets =
1661 MLX5_GET64(query_vport_counter_out,
1662 out, received_eth_unicast.octets);
1663 vc->received_eth_unicast.packets =
1664 MLX5_GET64(query_vport_counter_out,
1665 out, received_eth_unicast.packets);
1666 vc->transmitted_eth_unicast.octets =
1667 MLX5_GET64(query_vport_counter_out,
1668 out, transmitted_eth_unicast.octets);
1669 vc->transmitted_eth_unicast.packets =
1670 MLX5_GET64(query_vport_counter_out,
1671 out, transmitted_eth_unicast.packets);
1672 vc->received_eth_multicast.octets =
1673 MLX5_GET64(query_vport_counter_out,
1674 out, received_eth_multicast.octets);
1675 vc->received_eth_multicast.packets =
1676 MLX5_GET64(query_vport_counter_out,
1677 out, received_eth_multicast.packets);
1678 vc->transmitted_eth_multicast.octets =
1679 MLX5_GET64(query_vport_counter_out,
1680 out, transmitted_eth_multicast.octets);
1681 vc->transmitted_eth_multicast.packets =
1682 MLX5_GET64(query_vport_counter_out,
1683 out, transmitted_eth_multicast.packets);
1690 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1691 u64 *sys_image_guid)
1693 switch (MLX5_CAP_GEN(dev, port_type)) {
1694 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1695 return mlx5_query_hca_vport_system_image_guid(dev,
1698 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1699 return mlx5_query_nic_vport_system_image_guid(dev,
1706 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1708 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1710 switch (MLX5_CAP_GEN(dev, port_type)) {
1711 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1712 return mlx5_query_hca_vport_node_guid(dev, node_guid);
1714 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1715 return mlx5_query_nic_vport_node_guid(dev, node_guid);
1721 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1723 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1725 switch (MLX5_CAP_GEN(dev, port_type)) {
1726 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1727 return mlx5_query_hca_vport_port_guid(dev, port_guid);
1729 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1730 return mlx5_query_nic_vport_port_guid(dev, port_guid);
1736 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1738 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1741 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1744 out = mlx5_vzalloc(outlen);
1748 err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1752 *vport_state = MLX5_GET(query_hca_vport_context_out, out,
1753 hca_vport_context.vport_state);
1759 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1761 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1762 u8 port_num, void *out, size_t sz)
1767 in = mlx5_vzalloc(sz);
1773 MLX5_SET(ppcnt_reg, in, local_port, port_num);
1775 MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
1776 err = mlx5_core_access_reg(dev, in, sz, out,
1777 sz, MLX5_REG_PPCNT, 0, 0);