2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
33 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
36 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37 u16 vport, u32 *out, int outlen)
40 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
42 memset(in, 0, sizeof(in));
44 MLX5_SET(query_vport_state_in, in, opcode,
45 MLX5_CMD_OP_QUERY_VPORT_STATE);
46 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
47 MLX5_SET(query_vport_state_in, in, vport_number, vport);
49 MLX5_SET(query_vport_state_in, in, other_vport, 1);
51 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
53 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
58 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
60 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
62 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
64 return MLX5_GET(query_vport_state_out, out, state);
66 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
68 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
70 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
72 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
74 return MLX5_GET(query_vport_state_out, out, admin_state);
76 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
78 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
81 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
82 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
85 memset(in, 0, sizeof(in));
87 MLX5_SET(modify_vport_state_in, in, opcode,
88 MLX5_CMD_OP_MODIFY_VPORT_STATE);
89 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
90 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
93 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
95 MLX5_SET(modify_vport_state_in, in, admin_state, state);
97 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
100 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
104 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
106 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
107 u32 *out, int outlen)
109 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
111 memset(in, 0, sizeof(in));
113 MLX5_SET(query_nic_vport_context_in, in, opcode,
114 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
116 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
118 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
120 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
123 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
127 case MLX5_INTERFACE_PROTOCOL_IB:
128 return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
129 MLX5_QCOUNTER_SETS_NETDEV);
130 case MLX5_INTERFACE_PROTOCOL_ETH:
131 return MLX5_QCOUNTER_SETS_NETDEV;
133 mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
138 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
139 int client_id, u16 *counter_set_id)
141 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
142 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
145 if (mdev->num_q_counter_allocated[client_id] >
146 mlx5_vport_max_q_counter_allocator(mdev, client_id))
149 memset(in, 0, sizeof(in));
150 memset(out, 0, sizeof(out));
152 MLX5_SET(alloc_q_counter_in, in, opcode,
153 MLX5_CMD_OP_ALLOC_Q_COUNTER);
155 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
159 *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
162 mdev->num_q_counter_allocated[client_id]++;
167 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
168 int client_id, u16 counter_set_id)
170 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
171 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
174 if (mdev->num_q_counter_allocated[client_id] <= 0)
177 memset(in, 0, sizeof(in));
178 memset(out, 0, sizeof(out));
180 MLX5_SET(dealloc_q_counter_in, in, opcode,
181 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
182 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
185 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
188 mdev->num_q_counter_allocated[client_id]--;
193 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
199 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
201 memset(in, 0, sizeof(in));
203 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
204 MLX5_SET(query_q_counter_in, in, clear, reset);
205 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
207 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
211 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
213 u32 *out_of_rx_buffer)
215 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
218 memset(out, 0, sizeof(out));
220 err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
226 *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
231 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
235 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
239 out = mlx5_vzalloc(outlen);
243 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
244 nic_vport_context.permanent_address);
246 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
250 ether_addr_copy(addr, &out_addr[2]);
256 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
258 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
262 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
267 in = mlx5_vzalloc(inlen);
269 mlx5_core_warn(mdev, "failed to allocate inbox\n");
273 MLX5_SET(modify_nic_vport_context_in, in,
274 field_select.permanent_address, 1);
275 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
278 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
280 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
281 in, nic_vport_context);
282 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
285 ether_addr_copy(&perm_mac[2], addr);
287 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
293 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
295 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
296 u64 *system_image_guid)
299 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
302 out = mlx5_vzalloc(outlen);
306 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
310 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
311 nic_vport_context.system_image_guid);
316 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
318 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
321 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
324 out = mlx5_vzalloc(outlen);
328 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
332 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
333 nic_vport_context.node_guid);
339 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
341 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
345 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
348 out = mlx5_vzalloc(outlen);
352 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
356 *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
357 nic_vport_context.port_guid);
364 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
368 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
371 out = mlx5_vzalloc(outlen);
375 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
379 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
380 nic_vport_context.qkey_violation_counter);
386 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
388 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
391 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
393 MLX5_SET(modify_nic_vport_context_in, in, opcode,
394 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
396 memset(out, 0, sizeof(out));
397 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
400 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
404 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
407 in = mlx5_vzalloc(inlen);
409 mlx5_core_warn(mdev, "failed to allocate inbox\n");
413 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
414 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
417 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
424 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
425 bool other_vport, u8 *addr)
428 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
429 + MLX5_ST_SZ_BYTES(mac_address_layout);
434 in = mlx5_vzalloc(inlen);
436 mlx5_core_warn(mdev, "failed to allocate inbox\n");
440 MLX5_SET(modify_nic_vport_context_in, in,
441 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
442 MLX5_SET(modify_nic_vport_context_in, in,
443 vport_number, vport);
444 MLX5_SET(modify_nic_vport_context_in, in,
445 other_vport, other_vport);
446 MLX5_SET(modify_nic_vport_context_in, in,
447 field_select.addresses_list, 1);
448 MLX5_SET(modify_nic_vport_context_in, in,
449 nic_vport_context.allowed_list_type,
450 MLX5_NIC_VPORT_LIST_TYPE_UC);
451 MLX5_SET(modify_nic_vport_context_in, in,
452 nic_vport_context.allowed_list_size, 1);
454 mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
455 nic_vport_context.current_uc_mac_address);
456 mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
458 ether_addr_copy(mac_ptr, addr);
460 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
466 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
468 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
469 u32 vport, u64 node_guid)
472 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
474 void *nic_vport_context;
478 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
480 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
483 in = mlx5_vzalloc(inlen);
485 mlx5_core_warn(mdev, "failed to allocate inbox\n");
489 MLX5_SET(modify_nic_vport_context_in, in,
490 field_select.node_guid, 1);
491 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
493 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
495 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
496 in, nic_vport_context);
497 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
499 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
505 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
507 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
508 u32 vport, u64 port_guid)
511 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
513 void *nic_vport_context;
517 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
519 if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
522 in = mlx5_vzalloc(inlen);
524 mlx5_core_warn(mdev, "failed to allocate inbox\n");
528 MLX5_SET(modify_nic_vport_context_in, in,
529 field_select.port_guid, 1);
530 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
532 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
534 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
535 in, nic_vport_context);
536 MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
538 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
544 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
546 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
547 u16 *vlan_list, int list_len)
551 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
552 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
554 int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
556 if (list_len > max_list_size) {
557 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
558 list_len, max_list_size);
562 in = mlx5_vzalloc(inlen);
564 mlx5_core_warn(dev, "failed to allocate inbox\n");
568 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
570 MLX5_SET(modify_nic_vport_context_in, in,
572 MLX5_SET(modify_nic_vport_context_in, in,
573 field_select.addresses_list, 1);
575 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
577 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
578 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
579 MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
581 for (i = 0; i < list_len; i++) {
582 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
583 current_uc_mac_address[i]);
584 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
587 err = mlx5_modify_nic_vport_context(dev, in, inlen);
592 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
594 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
595 u64 *addr_list, size_t addr_list_len)
598 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
599 + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
602 int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
604 if ((int)addr_list_len > max_list_sz) {
605 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
606 (int)addr_list_len, max_list_sz);
610 in = mlx5_vzalloc(inlen);
612 mlx5_core_warn(mdev, "failed to allocate inbox\n");
616 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
618 MLX5_SET(modify_nic_vport_context_in, in,
620 MLX5_SET(modify_nic_vport_context_in, in,
621 field_select.addresses_list, 1);
623 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
625 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
626 MLX5_NIC_VPORT_LIST_TYPE_MC);
627 MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
629 for (i = 0; i < addr_list_len; i++) {
630 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
631 current_uc_mac_address[i]);
632 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
634 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
637 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
643 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
645 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
646 bool promisc_mc, bool promisc_uc,
649 u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
650 u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
653 memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
655 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
657 MLX5_SET(modify_nic_vport_context_in, in,
659 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
661 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
663 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
665 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
667 return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
669 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
671 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
673 enum mlx5_list_type list_type,
674 u8 addr_list[][ETH_ALEN],
677 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
686 req_list_size = *list_size;
688 max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
689 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
690 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
692 if (req_list_size > max_list_size) {
693 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
694 req_list_size, max_list_size);
695 req_list_size = max_list_size;
698 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
699 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
701 memset(in, 0, sizeof(in));
702 out = kzalloc(out_sz, GFP_KERNEL);
706 MLX5_SET(query_nic_vport_context_in, in, opcode,
707 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
708 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
709 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
712 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
714 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
718 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
720 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
723 *list_size = req_list_size;
724 for (i = 0; i < req_list_size; i++) {
725 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
727 current_uc_mac_address[i]) + 2;
728 ether_addr_copy(addr_list[i], mac_addr);
734 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
736 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
737 enum mlx5_list_type list_type,
738 u8 addr_list[][ETH_ALEN],
741 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
749 max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
750 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
751 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
753 if (list_size > max_list_size)
756 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
757 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
759 memset(out, 0, sizeof(out));
760 in = kzalloc(in_sz, GFP_KERNEL);
764 MLX5_SET(modify_nic_vport_context_in, in, opcode,
765 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
766 MLX5_SET(modify_nic_vport_context_in, in,
767 field_select.addresses_list, 1);
769 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
772 MLX5_SET(nic_vport_context, nic_vport_ctx,
773 allowed_list_type, list_type);
774 MLX5_SET(nic_vport_context, nic_vport_ctx,
775 allowed_list_size, list_size);
777 for (i = 0; i < list_size; i++) {
778 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
780 current_uc_mac_address[i]) + 2;
781 ether_addr_copy(curr_mac, addr_list[i]);
784 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
788 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
790 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
795 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
804 req_list_size = *size;
805 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
806 if (req_list_size > max_list_size) {
807 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
808 req_list_size, max_list_size);
809 req_list_size = max_list_size;
812 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
813 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
815 memset(in, 0, sizeof(in));
816 out = kzalloc(out_sz, GFP_KERNEL);
820 MLX5_SET(query_nic_vport_context_in, in, opcode,
821 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
822 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
823 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
824 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
827 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
829 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
833 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
835 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
838 *size = req_list_size;
839 for (i = 0; i < req_list_size; i++) {
840 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
842 current_uc_mac_address[i]);
843 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
849 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
851 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
855 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
863 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
865 if (list_size > max_list_size)
868 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
869 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
871 memset(out, 0, sizeof(out));
872 in = kzalloc(in_sz, GFP_KERNEL);
876 MLX5_SET(modify_nic_vport_context_in, in, opcode,
877 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
878 MLX5_SET(modify_nic_vport_context_in, in,
879 field_select.addresses_list, 1);
881 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
884 MLX5_SET(nic_vport_context, nic_vport_ctx,
885 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
886 MLX5_SET(nic_vport_context, nic_vport_ctx,
887 allowed_list_size, list_size);
889 for (i = 0; i < list_size; i++) {
890 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
892 current_uc_mac_address[i]);
893 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
896 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
900 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
902 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
905 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
908 out = kzalloc(outlen, GFP_KERNEL);
912 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
916 *enable = MLX5_GET(query_nic_vport_context_out, out,
917 nic_vport_context.roce_en);
923 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
925 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
929 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
933 in = mlx5_vzalloc(inlen);
935 mlx5_core_warn(mdev, "failed to allocate inbox\n");
939 MLX5_SET(modify_nic_vport_context_in, in,
940 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
941 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
942 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
943 MLX5_SET(modify_nic_vport_context_in, in,
944 field_select.permanent_address, 1);
945 mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
946 nic_vport_context.permanent_address.mac_addr_47_32);
947 ether_addr_copy(mac_ptr, addr);
949 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
955 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
957 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
959 return mlx5_nic_vport_enable_disable_roce(mdev, 1);
961 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
963 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
965 return mlx5_nic_vport_enable_disable_roce(mdev, 0);
967 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
969 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
970 u8 port_num, u8 vport_num, u32 *out,
973 u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
974 int is_group_manager;
976 is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
978 memset(in, 0, sizeof(in));
980 MLX5_SET(query_hca_vport_context_in, in, opcode,
981 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
984 if (is_group_manager) {
985 MLX5_SET(query_hca_vport_context_in, in, other_vport,
987 MLX5_SET(query_hca_vport_context_in, in, vport_number,
994 if (MLX5_CAP_GEN(mdev, num_ports) == 2)
995 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
997 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
1000 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1001 u64 *system_image_guid)
1004 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1007 out = mlx5_vzalloc(outlen);
1011 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1015 *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1016 hca_vport_context.system_image_guid);
1022 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1024 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1027 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1030 out = mlx5_vzalloc(outlen);
1034 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1038 *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1039 hca_vport_context.node_guid);
1045 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1047 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1051 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1054 out = mlx5_vzalloc(outlen);
1058 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1062 *port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1063 hca_vport_context.port_guid);
1070 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1071 u16 vport_num, u16 gid_index, union ib_gid *gid)
1073 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1074 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1075 int is_group_manager;
1083 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1084 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1086 if (gid_index > tbsz && gid_index != 0xffff)
1089 if (gid_index == 0xffff)
1094 out_sz += nout * sizeof(*gid);
1096 in = mlx5_vzalloc(in_sz);
1097 out = mlx5_vzalloc(out_sz);
1103 MLX5_SET(query_hca_vport_gid_in, in, opcode,
1104 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1106 if (is_group_manager) {
1107 MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1109 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1116 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1118 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1119 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1121 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1125 err = mlx5_cmd_status_to_err_v2(out);
1129 tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1130 gid->global.subnet_prefix = tmp->global.subnet_prefix;
1131 gid->global.interface_id = tmp->global.interface_id;
1138 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1140 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1141 u8 port_num, u16 vf_num, u16 pkey_index,
1144 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1145 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1146 int is_group_manager;
1155 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1157 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1158 if (pkey_index > tbsz && pkey_index != 0xffff)
1161 if (pkey_index == 0xffff)
1166 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1168 in = kzalloc(in_sz, GFP_KERNEL);
1169 out = kzalloc(out_sz, GFP_KERNEL);
1171 MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1172 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1174 if (is_group_manager) {
1175 MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1177 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1183 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1185 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1186 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1188 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1192 err = mlx5_cmd_status_to_err_v2(out);
1196 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1197 for (i = 0; i < nout; i++, pkey++,
1198 pkarr += MLX5_ST_SZ_BYTES(pkey))
1199 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1206 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1208 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1212 u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1215 out = mlx5_vzalloc(outlen);
1219 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1223 *min_header = MLX5_GET(query_hca_vport_context_out, out,
1224 hca_vport_context.min_wqe_inline_mode);
1231 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1232 u16 vport, void *in, int inlen)
1234 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
1237 memset(out, 0, sizeof(out));
1239 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1241 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1243 MLX5_SET(modify_esw_vport_context_in, in, opcode,
1244 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1246 err = mlx5_cmd_exec_check_status(mdev, in, inlen,
1249 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1254 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1255 u8 insert_mode, u8 strip_mode,
1256 u16 vlan, u8 cfi, u8 pcp)
1258 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1260 memset(in, 0, sizeof(in));
1262 if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1263 MLX5_SET(modify_esw_vport_context_in, in,
1264 esw_vport_context.cvlan_cfi, cfi);
1265 MLX5_SET(modify_esw_vport_context_in, in,
1266 esw_vport_context.cvlan_pcp, pcp);
1267 MLX5_SET(modify_esw_vport_context_in, in,
1268 esw_vport_context.cvlan_id, vlan);
1271 MLX5_SET(modify_esw_vport_context_in, in,
1272 esw_vport_context.vport_cvlan_insert, insert_mode);
1274 MLX5_SET(modify_esw_vport_context_in, in,
1275 esw_vport_context.vport_cvlan_strip, strip_mode);
1277 MLX5_SET(modify_esw_vport_context_in, in, field_select,
1278 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1279 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1281 return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1283 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1285 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1288 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1291 out = mlx5_vzalloc(outlen);
1295 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1299 *mtu = MLX5_GET(query_nic_vport_context_out, out,
1300 nic_vport_context.mtu);
1306 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1308 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1311 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1314 in = mlx5_vzalloc(inlen);
1318 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1319 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1321 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1326 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1328 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1332 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1335 out = mlx5_vzalloc(outlen);
1339 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1343 *min_header = MLX5_GET(query_nic_vport_context_out, out,
1344 nic_vport_context.min_wqe_inline_mode);
1351 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1352 u8 vport, int min_header)
1355 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1358 in = mlx5_vzalloc(inlen);
1362 MLX5_SET(modify_nic_vport_context_in, in,
1363 field_select.min_wqe_inline_mode, 1);
1364 MLX5_SET(modify_nic_vport_context_in, in,
1365 nic_vport_context.min_wqe_inline_mode, min_header);
1366 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1367 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1369 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1374 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1376 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1378 switch (MLX5_CAP_GEN(dev, port_type)) {
1379 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1380 return mlx5_query_hca_min_wqe_header(dev, min_header);
1382 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1383 return mlx5_query_vport_min_wqe_header(dev, min_header);
1389 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1391 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1398 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1401 out = kzalloc(outlen, GFP_KERNEL);
1405 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1409 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1410 nic_vport_context.promisc_uc);
1411 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1412 nic_vport_context.promisc_mc);
1413 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1414 nic_vport_context.promisc_all);
1420 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1422 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1428 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1431 in = mlx5_vzalloc(inlen);
1433 mlx5_core_err(mdev, "failed to allocate inbox\n");
1437 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1438 MLX5_SET(modify_nic_vport_context_in, in,
1439 nic_vport_context.promisc_uc, promisc_uc);
1440 MLX5_SET(modify_nic_vport_context_in, in,
1441 nic_vport_context.promisc_mc, promisc_mc);
1442 MLX5_SET(modify_nic_vport_context_in, in,
1443 nic_vport_context.promisc_all, promisc_all);
1445 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1449 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1451 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1452 u8 port_num, u16 vport_num,
1453 void *out, int out_size)
1455 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1456 int is_group_manager;
1460 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1462 in = mlx5_vzalloc(in_sz);
1466 MLX5_SET(query_vport_counter_in, in, opcode,
1467 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1469 if (is_group_manager) {
1470 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1471 MLX5_SET(query_vport_counter_in, in, vport_number,
1478 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1479 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1481 err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
1484 err = mlx5_cmd_status_to_err_v2(out);
1492 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1494 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1495 struct mlx5_vport_counters *vc)
1497 int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1501 out = mlx5_vzalloc(out_sz);
1505 err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1509 vc->received_errors.packets =
1510 MLX5_GET64(query_vport_counter_out,
1511 out, received_errors.packets);
1512 vc->received_errors.octets =
1513 MLX5_GET64(query_vport_counter_out,
1514 out, received_errors.octets);
1515 vc->transmit_errors.packets =
1516 MLX5_GET64(query_vport_counter_out,
1517 out, transmit_errors.packets);
1518 vc->transmit_errors.octets =
1519 MLX5_GET64(query_vport_counter_out,
1520 out, transmit_errors.octets);
1521 vc->received_ib_unicast.packets =
1522 MLX5_GET64(query_vport_counter_out,
1523 out, received_ib_unicast.packets);
1524 vc->received_ib_unicast.octets =
1525 MLX5_GET64(query_vport_counter_out,
1526 out, received_ib_unicast.octets);
1527 vc->transmitted_ib_unicast.packets =
1528 MLX5_GET64(query_vport_counter_out,
1529 out, transmitted_ib_unicast.packets);
1530 vc->transmitted_ib_unicast.octets =
1531 MLX5_GET64(query_vport_counter_out,
1532 out, transmitted_ib_unicast.octets);
1533 vc->received_ib_multicast.packets =
1534 MLX5_GET64(query_vport_counter_out,
1535 out, received_ib_multicast.packets);
1536 vc->received_ib_multicast.octets =
1537 MLX5_GET64(query_vport_counter_out,
1538 out, received_ib_multicast.octets);
1539 vc->transmitted_ib_multicast.packets =
1540 MLX5_GET64(query_vport_counter_out,
1541 out, transmitted_ib_multicast.packets);
1542 vc->transmitted_ib_multicast.octets =
1543 MLX5_GET64(query_vport_counter_out,
1544 out, transmitted_ib_multicast.octets);
1545 vc->received_eth_broadcast.packets =
1546 MLX5_GET64(query_vport_counter_out,
1547 out, received_eth_broadcast.packets);
1548 vc->received_eth_broadcast.octets =
1549 MLX5_GET64(query_vport_counter_out,
1550 out, received_eth_broadcast.octets);
1551 vc->transmitted_eth_broadcast.packets =
1552 MLX5_GET64(query_vport_counter_out,
1553 out, transmitted_eth_broadcast.packets);
1554 vc->transmitted_eth_broadcast.octets =
1555 MLX5_GET64(query_vport_counter_out,
1556 out, transmitted_eth_broadcast.octets);
1557 vc->received_eth_unicast.octets =
1558 MLX5_GET64(query_vport_counter_out,
1559 out, received_eth_unicast.octets);
1560 vc->received_eth_unicast.packets =
1561 MLX5_GET64(query_vport_counter_out,
1562 out, received_eth_unicast.packets);
1563 vc->transmitted_eth_unicast.octets =
1564 MLX5_GET64(query_vport_counter_out,
1565 out, transmitted_eth_unicast.octets);
1566 vc->transmitted_eth_unicast.packets =
1567 MLX5_GET64(query_vport_counter_out,
1568 out, transmitted_eth_unicast.packets);
1569 vc->received_eth_multicast.octets =
1570 MLX5_GET64(query_vport_counter_out,
1571 out, received_eth_multicast.octets);
1572 vc->received_eth_multicast.packets =
1573 MLX5_GET64(query_vport_counter_out,
1574 out, received_eth_multicast.packets);
1575 vc->transmitted_eth_multicast.octets =
1576 MLX5_GET64(query_vport_counter_out,
1577 out, transmitted_eth_multicast.octets);
1578 vc->transmitted_eth_multicast.packets =
1579 MLX5_GET64(query_vport_counter_out,
1580 out, transmitted_eth_multicast.packets);
1587 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1588 u64 *sys_image_guid)
1590 switch (MLX5_CAP_GEN(dev, port_type)) {
1591 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1592 return mlx5_query_hca_vport_system_image_guid(dev,
1595 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1596 return mlx5_query_nic_vport_system_image_guid(dev,
1603 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1605 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1607 switch (MLX5_CAP_GEN(dev, port_type)) {
1608 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1609 return mlx5_query_hca_vport_node_guid(dev, node_guid);
1611 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1612 return mlx5_query_nic_vport_node_guid(dev, node_guid);
1618 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1620 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1622 switch (MLX5_CAP_GEN(dev, port_type)) {
1623 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1624 return mlx5_query_hca_vport_port_guid(dev, port_guid);
1626 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1627 return mlx5_query_nic_vport_port_guid(dev, port_guid);
1633 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1635 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1638 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1641 out = mlx5_vzalloc(outlen);
1645 err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1649 *vport_state = MLX5_GET(query_hca_vport_context_out, out,
1650 hca_vport_context.vport_state);
1656 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);