2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
33 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
36 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37 u16 vport, u32 *out, int outlen)
40 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
42 MLX5_SET(query_vport_state_in, in, opcode,
43 MLX5_CMD_OP_QUERY_VPORT_STATE);
44 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
45 MLX5_SET(query_vport_state_in, in, vport_number, vport);
47 MLX5_SET(query_vport_state_in, in, other_vport, 1);
49 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
51 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
56 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
58 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
60 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
62 return MLX5_GET(query_vport_state_out, out, state);
64 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
66 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
68 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
70 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
72 return MLX5_GET(query_vport_state_out, out, admin_state);
74 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
76 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
79 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
80 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
83 MLX5_SET(modify_vport_state_in, in, opcode,
84 MLX5_CMD_OP_MODIFY_VPORT_STATE);
85 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
86 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
89 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
91 MLX5_SET(modify_vport_state_in, in, admin_state, state);
93 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
95 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
99 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
101 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
102 u32 *out, int outlen)
104 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
106 MLX5_SET(query_nic_vport_context_in, in, opcode,
107 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
109 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
111 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
113 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
116 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
120 case MLX5_INTERFACE_PROTOCOL_IB:
121 return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
122 MLX5_QCOUNTER_SETS_NETDEV);
123 case MLX5_INTERFACE_PROTOCOL_ETH:
124 return MLX5_QCOUNTER_SETS_NETDEV;
126 mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
131 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
132 int client_id, u16 *counter_set_id)
134 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
135 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
138 if (mdev->num_q_counter_allocated[client_id] >
139 mlx5_vport_max_q_counter_allocator(mdev, client_id))
142 MLX5_SET(alloc_q_counter_in, in, opcode,
143 MLX5_CMD_OP_ALLOC_Q_COUNTER);
145 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
148 *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
151 mdev->num_q_counter_allocated[client_id]++;
156 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
157 int client_id, u16 counter_set_id)
159 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
160 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
163 if (mdev->num_q_counter_allocated[client_id] <= 0)
166 MLX5_SET(dealloc_q_counter_in, in, opcode,
167 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
168 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
171 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
173 mdev->num_q_counter_allocated[client_id]--;
178 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
184 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
186 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
187 MLX5_SET(query_q_counter_in, in, clear, reset);
188 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
190 return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
193 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
195 u32 *out_of_rx_buffer)
197 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {0};
200 err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
206 *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
211 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
212 u16 vport, u8 *min_inline)
214 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
217 err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
219 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
220 nic_vport_context.min_wqe_inline_mode);
223 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
225 int mlx5_query_min_inline(struct mlx5_core_dev *mdev,
230 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
231 case MLX5_CAP_INLINE_MODE_L2:
232 *min_inline_mode = MLX5_INLINE_MODE_L2;
235 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
236 err = mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
238 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
239 *min_inline_mode = MLX5_INLINE_MODE_NONE;
248 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
250 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
251 u16 vport, u8 min_inline)
253 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
254 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
257 MLX5_SET(modify_nic_vport_context_in, in,
258 field_select.min_wqe_inline_mode, 1);
259 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
260 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
262 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
263 in, nic_vport_context);
264 MLX5_SET(nic_vport_context, nic_vport_ctx,
265 min_wqe_inline_mode, min_inline);
267 return mlx5_modify_nic_vport_context(mdev, in, inlen);
269 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_min_inline);
271 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
275 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
279 out = mlx5_vzalloc(outlen);
283 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
284 nic_vport_context.permanent_address);
286 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
290 ether_addr_copy(addr, &out_addr[2]);
296 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
298 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
302 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
307 in = mlx5_vzalloc(inlen);
309 mlx5_core_warn(mdev, "failed to allocate inbox\n");
313 MLX5_SET(modify_nic_vport_context_in, in,
314 field_select.permanent_address, 1);
315 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
318 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
320 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
321 in, nic_vport_context);
322 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
325 ether_addr_copy(&perm_mac[2], addr);
327 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
333 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
335 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
336 u64 *system_image_guid)
339 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
342 out = mlx5_vzalloc(outlen);
346 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
350 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
351 nic_vport_context.system_image_guid);
356 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
358 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
361 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
364 out = mlx5_vzalloc(outlen);
368 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
372 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
373 nic_vport_context.node_guid);
379 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
381 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
385 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
388 out = mlx5_vzalloc(outlen);
392 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
396 *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
397 nic_vport_context.port_guid);
404 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
408 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
411 out = mlx5_vzalloc(outlen);
415 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
419 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
420 nic_vport_context.qkey_violation_counter);
426 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
428 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
431 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
433 MLX5_SET(modify_nic_vport_context_in, in, opcode,
434 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
436 return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
439 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
443 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
446 in = mlx5_vzalloc(inlen);
448 mlx5_core_warn(mdev, "failed to allocate inbox\n");
452 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
453 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
456 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
463 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
464 bool other_vport, u8 *addr)
467 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
468 + MLX5_ST_SZ_BYTES(mac_address_layout);
473 in = mlx5_vzalloc(inlen);
475 mlx5_core_warn(mdev, "failed to allocate inbox\n");
479 MLX5_SET(modify_nic_vport_context_in, in,
480 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
481 MLX5_SET(modify_nic_vport_context_in, in,
482 vport_number, vport);
483 MLX5_SET(modify_nic_vport_context_in, in,
484 other_vport, other_vport);
485 MLX5_SET(modify_nic_vport_context_in, in,
486 field_select.addresses_list, 1);
487 MLX5_SET(modify_nic_vport_context_in, in,
488 nic_vport_context.allowed_list_type,
489 MLX5_NIC_VPORT_LIST_TYPE_UC);
490 MLX5_SET(modify_nic_vport_context_in, in,
491 nic_vport_context.allowed_list_size, 1);
493 mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
494 nic_vport_context.current_uc_mac_address);
495 mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
497 ether_addr_copy(mac_ptr, addr);
499 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
505 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
507 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
508 u32 vport, u64 node_guid)
511 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
513 void *nic_vport_context;
517 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
519 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
522 in = mlx5_vzalloc(inlen);
524 mlx5_core_warn(mdev, "failed to allocate inbox\n");
528 MLX5_SET(modify_nic_vport_context_in, in,
529 field_select.node_guid, 1);
530 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
532 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
534 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
535 in, nic_vport_context);
536 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
538 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
544 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
546 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
547 u32 vport, u64 port_guid)
550 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
552 void *nic_vport_context;
556 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
558 if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
561 in = mlx5_vzalloc(inlen);
563 mlx5_core_warn(mdev, "failed to allocate inbox\n");
567 MLX5_SET(modify_nic_vport_context_in, in,
568 field_select.port_guid, 1);
569 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
571 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
573 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
574 in, nic_vport_context);
575 MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
577 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
583 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
585 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
586 u16 *vlan_list, int list_len)
590 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
591 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
593 int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
595 if (list_len > max_list_size) {
596 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
597 list_len, max_list_size);
601 in = mlx5_vzalloc(inlen);
603 mlx5_core_warn(dev, "failed to allocate inbox\n");
607 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
609 MLX5_SET(modify_nic_vport_context_in, in,
611 MLX5_SET(modify_nic_vport_context_in, in,
612 field_select.addresses_list, 1);
614 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
616 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
617 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
618 MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
620 for (i = 0; i < list_len; i++) {
621 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
622 current_uc_mac_address[i]);
623 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
626 err = mlx5_modify_nic_vport_context(dev, in, inlen);
631 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
633 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
634 u64 *addr_list, size_t addr_list_len)
637 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
638 + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
641 int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
643 if ((int)addr_list_len > max_list_sz) {
644 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
645 (int)addr_list_len, max_list_sz);
649 in = mlx5_vzalloc(inlen);
651 mlx5_core_warn(mdev, "failed to allocate inbox\n");
655 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
657 MLX5_SET(modify_nic_vport_context_in, in,
659 MLX5_SET(modify_nic_vport_context_in, in,
660 field_select.addresses_list, 1);
662 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
664 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
665 MLX5_NIC_VPORT_LIST_TYPE_MC);
666 MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
668 for (i = 0; i < addr_list_len; i++) {
669 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
670 current_uc_mac_address[i]);
671 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
673 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
676 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
682 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
684 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
685 bool promisc_mc, bool promisc_uc,
688 u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
689 u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
692 memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
694 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
696 MLX5_SET(modify_nic_vport_context_in, in,
698 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
700 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
702 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
704 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
706 return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
708 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
710 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
712 enum mlx5_list_type list_type,
713 u8 addr_list[][ETH_ALEN],
716 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
725 req_list_size = *list_size;
727 max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
728 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
729 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
731 if (req_list_size > max_list_size) {
732 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
733 req_list_size, max_list_size);
734 req_list_size = max_list_size;
737 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
738 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
740 out = kzalloc(out_sz, GFP_KERNEL);
744 MLX5_SET(query_nic_vport_context_in, in, opcode,
745 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
746 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
747 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
750 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
752 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
756 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
758 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
761 *list_size = req_list_size;
762 for (i = 0; i < req_list_size; i++) {
763 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
765 current_uc_mac_address[i]) + 2;
766 ether_addr_copy(addr_list[i], mac_addr);
772 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
774 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
775 enum mlx5_list_type list_type,
776 u8 addr_list[][ETH_ALEN],
779 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
787 max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
788 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
789 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
791 if (list_size > max_list_size)
794 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
795 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
797 in = kzalloc(in_sz, GFP_KERNEL);
801 MLX5_SET(modify_nic_vport_context_in, in, opcode,
802 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
803 MLX5_SET(modify_nic_vport_context_in, in,
804 field_select.addresses_list, 1);
806 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
809 MLX5_SET(nic_vport_context, nic_vport_ctx,
810 allowed_list_type, list_type);
811 MLX5_SET(nic_vport_context, nic_vport_ctx,
812 allowed_list_size, list_size);
814 for (i = 0; i < list_size; i++) {
815 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
817 current_uc_mac_address[i]) + 2;
818 ether_addr_copy(curr_mac, addr_list[i]);
821 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
825 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
827 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
832 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
841 req_list_size = *size;
842 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
843 if (req_list_size > max_list_size) {
844 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
845 req_list_size, max_list_size);
846 req_list_size = max_list_size;
849 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
850 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
852 out = kzalloc(out_sz, GFP_KERNEL);
856 MLX5_SET(query_nic_vport_context_in, in, opcode,
857 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
858 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
859 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
860 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
863 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
865 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
869 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
871 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
874 *size = req_list_size;
875 for (i = 0; i < req_list_size; i++) {
876 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
878 current_uc_mac_address[i]);
879 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
885 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
887 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
891 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
899 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
901 if (list_size > max_list_size)
904 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
905 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
907 in = kzalloc(in_sz, GFP_KERNEL);
911 MLX5_SET(modify_nic_vport_context_in, in, opcode,
912 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
913 MLX5_SET(modify_nic_vport_context_in, in,
914 field_select.addresses_list, 1);
916 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
919 MLX5_SET(nic_vport_context, nic_vport_ctx,
920 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
921 MLX5_SET(nic_vport_context, nic_vport_ctx,
922 allowed_list_size, list_size);
924 for (i = 0; i < list_size; i++) {
925 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
927 current_uc_mac_address[i]);
928 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
931 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
935 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
937 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
940 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
943 out = kzalloc(outlen, GFP_KERNEL);
947 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
951 *enable = MLX5_GET(query_nic_vport_context_out, out,
952 nic_vport_context.roce_en);
958 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
960 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
964 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
968 in = mlx5_vzalloc(inlen);
970 mlx5_core_warn(mdev, "failed to allocate inbox\n");
974 MLX5_SET(modify_nic_vport_context_in, in,
975 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
976 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
977 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
978 MLX5_SET(modify_nic_vport_context_in, in,
979 field_select.permanent_address, 1);
980 mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
981 nic_vport_context.permanent_address.mac_addr_47_32);
982 ether_addr_copy(mac_ptr, addr);
984 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
990 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
992 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
994 return mlx5_nic_vport_enable_disable_roce(mdev, 1);
996 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
998 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1000 return mlx5_nic_vport_enable_disable_roce(mdev, 0);
1002 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1004 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1005 int vf, u8 port_num, void *out,
1008 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1009 int is_group_manager;
1013 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1014 in = mlx5_vzalloc(in_sz);
1020 MLX5_SET(query_vport_counter_in, in, opcode,
1021 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1023 if (is_group_manager) {
1024 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1025 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1031 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1032 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1034 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1039 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1041 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
1042 u8 port_num, u8 vport_num, u32 *out,
1045 u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
1046 int is_group_manager;
1048 is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
1050 MLX5_SET(query_hca_vport_context_in, in, opcode,
1051 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
1054 if (is_group_manager) {
1055 MLX5_SET(query_hca_vport_context_in, in, other_vport,
1057 MLX5_SET(query_hca_vport_context_in, in, vport_number,
1064 if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1065 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1067 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
1070 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1071 u64 *system_image_guid)
1074 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1077 out = mlx5_vzalloc(outlen);
1081 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1085 *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1086 hca_vport_context.system_image_guid);
1092 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1094 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1097 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1100 out = mlx5_vzalloc(outlen);
1104 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1108 *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1109 hca_vport_context.node_guid);
1115 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1117 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1121 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1124 out = mlx5_vzalloc(outlen);
1128 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1132 *port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1133 hca_vport_context.port_guid);
1140 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1141 u16 vport_num, u16 gid_index, union ib_gid *gid)
1143 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1144 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1145 int is_group_manager;
1153 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1154 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1156 if (gid_index > tbsz && gid_index != 0xffff)
1159 if (gid_index == 0xffff)
1164 out_sz += nout * sizeof(*gid);
1166 in = mlx5_vzalloc(in_sz);
1167 out = mlx5_vzalloc(out_sz);
1173 MLX5_SET(query_hca_vport_gid_in, in, opcode,
1174 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1176 if (is_group_manager) {
1177 MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1179 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1186 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1188 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1189 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1191 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1195 tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1196 gid->global.subnet_prefix = tmp->global.subnet_prefix;
1197 gid->global.interface_id = tmp->global.interface_id;
1204 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1206 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1207 u8 port_num, u16 vf_num, u16 pkey_index,
1210 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1211 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1212 int is_group_manager;
1221 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1223 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1224 if (pkey_index > tbsz && pkey_index != 0xffff)
1227 if (pkey_index == 0xffff)
1232 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1234 in = kzalloc(in_sz, GFP_KERNEL);
1235 out = kzalloc(out_sz, GFP_KERNEL);
1237 MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1238 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1240 if (is_group_manager) {
1241 MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1243 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1249 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1251 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1252 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1254 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1258 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1259 for (i = 0; i < nout; i++, pkey++,
1260 pkarr += MLX5_ST_SZ_BYTES(pkey))
1261 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1268 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1270 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1274 u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1277 out = mlx5_vzalloc(outlen);
1281 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1285 *min_header = MLX5_GET(query_hca_vport_context_out, out,
1286 hca_vport_context.min_wqe_inline_mode);
1293 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1294 u16 vport, void *in, int inlen)
1296 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
1299 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1301 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1303 MLX5_SET(modify_esw_vport_context_in, in, opcode,
1304 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1306 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
1308 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1313 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1314 u8 insert_mode, u8 strip_mode,
1315 u16 vlan, u8 cfi, u8 pcp)
1317 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1319 memset(in, 0, sizeof(in));
1321 if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1322 MLX5_SET(modify_esw_vport_context_in, in,
1323 esw_vport_context.cvlan_cfi, cfi);
1324 MLX5_SET(modify_esw_vport_context_in, in,
1325 esw_vport_context.cvlan_pcp, pcp);
1326 MLX5_SET(modify_esw_vport_context_in, in,
1327 esw_vport_context.cvlan_id, vlan);
1330 MLX5_SET(modify_esw_vport_context_in, in,
1331 esw_vport_context.vport_cvlan_insert, insert_mode);
1333 MLX5_SET(modify_esw_vport_context_in, in,
1334 esw_vport_context.vport_cvlan_strip, strip_mode);
1336 MLX5_SET(modify_esw_vport_context_in, in, field_select,
1337 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1338 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1340 return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1342 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1344 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1347 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1350 out = mlx5_vzalloc(outlen);
1354 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1358 *mtu = MLX5_GET(query_nic_vport_context_out, out,
1359 nic_vport_context.mtu);
1365 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1367 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1370 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1373 in = mlx5_vzalloc(inlen);
1377 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1378 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1380 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1385 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1387 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1391 u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1394 out = mlx5_vzalloc(outlen);
1398 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1402 *min_header = MLX5_GET(query_nic_vport_context_out, out,
1403 nic_vport_context.min_wqe_inline_mode);
1410 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1411 u8 vport, int min_header)
1414 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1417 in = mlx5_vzalloc(inlen);
1421 MLX5_SET(modify_nic_vport_context_in, in,
1422 field_select.min_wqe_inline_mode, 1);
1423 MLX5_SET(modify_nic_vport_context_in, in,
1424 nic_vport_context.min_wqe_inline_mode, min_header);
1425 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1426 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1428 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1433 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1435 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1437 switch (MLX5_CAP_GEN(dev, port_type)) {
1438 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1439 return mlx5_query_hca_min_wqe_header(dev, min_header);
1441 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1442 return mlx5_query_vport_min_wqe_header(dev, min_header);
1448 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1450 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1457 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1460 out = kzalloc(outlen, GFP_KERNEL);
1464 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1468 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1469 nic_vport_context.promisc_uc);
1470 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1471 nic_vport_context.promisc_mc);
1472 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1473 nic_vport_context.promisc_all);
1479 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1481 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1487 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1490 in = mlx5_vzalloc(inlen);
1492 mlx5_core_err(mdev, "failed to allocate inbox\n");
1496 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1497 MLX5_SET(modify_nic_vport_context_in, in,
1498 nic_vport_context.promisc_uc, promisc_uc);
1499 MLX5_SET(modify_nic_vport_context_in, in,
1500 nic_vport_context.promisc_mc, promisc_mc);
1501 MLX5_SET(modify_nic_vport_context_in, in,
1502 nic_vport_context.promisc_all, promisc_all);
1504 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1508 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1510 int mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev *mdev,
1511 enum mlx5_local_lb_selection selection,
1515 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1518 in = mlx5_vzalloc(inlen);
1520 mlx5_core_warn(mdev, "failed to allocate inbox\n");
1524 MLX5_SET(modify_nic_vport_context_in, in, vport_number, 0);
1526 if (selection == MLX5_LOCAL_MC_LB) {
1527 MLX5_SET(modify_nic_vport_context_in, in,
1528 field_select.disable_mc_local_lb, 1);
1529 MLX5_SET(modify_nic_vport_context_in, in,
1530 nic_vport_context.disable_mc_local_lb,
1533 MLX5_SET(modify_nic_vport_context_in, in,
1534 field_select.disable_uc_local_lb, 1);
1535 MLX5_SET(modify_nic_vport_context_in, in,
1536 nic_vport_context.disable_uc_local_lb,
1540 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1545 EXPORT_SYMBOL_GPL(mlx5_nic_vport_modify_local_lb);
1547 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev,
1548 enum mlx5_local_lb_selection selection,
1552 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1555 out = kzalloc(outlen, GFP_KERNEL);
1559 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1563 if (selection == MLX5_LOCAL_MC_LB)
1564 *value = MLX5_GET(query_nic_vport_context_out, out,
1565 nic_vport_context.disable_mc_local_lb);
1567 *value = MLX5_GET(query_nic_vport_context_out, out,
1568 nic_vport_context.disable_uc_local_lb);
1574 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
1576 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1577 u8 port_num, u16 vport_num,
1578 void *out, int out_size)
1580 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1581 int is_group_manager;
1585 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1587 in = mlx5_vzalloc(in_sz);
1591 MLX5_SET(query_vport_counter_in, in, opcode,
1592 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1594 if (is_group_manager) {
1595 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1596 MLX5_SET(query_vport_counter_in, in, vport_number,
1603 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1604 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1606 err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
1612 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1614 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1615 struct mlx5_vport_counters *vc)
1617 int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1621 out = mlx5_vzalloc(out_sz);
1625 err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1629 vc->received_errors.packets =
1630 MLX5_GET64(query_vport_counter_out,
1631 out, received_errors.packets);
1632 vc->received_errors.octets =
1633 MLX5_GET64(query_vport_counter_out,
1634 out, received_errors.octets);
1635 vc->transmit_errors.packets =
1636 MLX5_GET64(query_vport_counter_out,
1637 out, transmit_errors.packets);
1638 vc->transmit_errors.octets =
1639 MLX5_GET64(query_vport_counter_out,
1640 out, transmit_errors.octets);
1641 vc->received_ib_unicast.packets =
1642 MLX5_GET64(query_vport_counter_out,
1643 out, received_ib_unicast.packets);
1644 vc->received_ib_unicast.octets =
1645 MLX5_GET64(query_vport_counter_out,
1646 out, received_ib_unicast.octets);
1647 vc->transmitted_ib_unicast.packets =
1648 MLX5_GET64(query_vport_counter_out,
1649 out, transmitted_ib_unicast.packets);
1650 vc->transmitted_ib_unicast.octets =
1651 MLX5_GET64(query_vport_counter_out,
1652 out, transmitted_ib_unicast.octets);
1653 vc->received_ib_multicast.packets =
1654 MLX5_GET64(query_vport_counter_out,
1655 out, received_ib_multicast.packets);
1656 vc->received_ib_multicast.octets =
1657 MLX5_GET64(query_vport_counter_out,
1658 out, received_ib_multicast.octets);
1659 vc->transmitted_ib_multicast.packets =
1660 MLX5_GET64(query_vport_counter_out,
1661 out, transmitted_ib_multicast.packets);
1662 vc->transmitted_ib_multicast.octets =
1663 MLX5_GET64(query_vport_counter_out,
1664 out, transmitted_ib_multicast.octets);
1665 vc->received_eth_broadcast.packets =
1666 MLX5_GET64(query_vport_counter_out,
1667 out, received_eth_broadcast.packets);
1668 vc->received_eth_broadcast.octets =
1669 MLX5_GET64(query_vport_counter_out,
1670 out, received_eth_broadcast.octets);
1671 vc->transmitted_eth_broadcast.packets =
1672 MLX5_GET64(query_vport_counter_out,
1673 out, transmitted_eth_broadcast.packets);
1674 vc->transmitted_eth_broadcast.octets =
1675 MLX5_GET64(query_vport_counter_out,
1676 out, transmitted_eth_broadcast.octets);
1677 vc->received_eth_unicast.octets =
1678 MLX5_GET64(query_vport_counter_out,
1679 out, received_eth_unicast.octets);
1680 vc->received_eth_unicast.packets =
1681 MLX5_GET64(query_vport_counter_out,
1682 out, received_eth_unicast.packets);
1683 vc->transmitted_eth_unicast.octets =
1684 MLX5_GET64(query_vport_counter_out,
1685 out, transmitted_eth_unicast.octets);
1686 vc->transmitted_eth_unicast.packets =
1687 MLX5_GET64(query_vport_counter_out,
1688 out, transmitted_eth_unicast.packets);
1689 vc->received_eth_multicast.octets =
1690 MLX5_GET64(query_vport_counter_out,
1691 out, received_eth_multicast.octets);
1692 vc->received_eth_multicast.packets =
1693 MLX5_GET64(query_vport_counter_out,
1694 out, received_eth_multicast.packets);
1695 vc->transmitted_eth_multicast.octets =
1696 MLX5_GET64(query_vport_counter_out,
1697 out, transmitted_eth_multicast.octets);
1698 vc->transmitted_eth_multicast.packets =
1699 MLX5_GET64(query_vport_counter_out,
1700 out, transmitted_eth_multicast.packets);
1707 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1708 u64 *sys_image_guid)
1710 switch (MLX5_CAP_GEN(dev, port_type)) {
1711 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1712 return mlx5_query_hca_vport_system_image_guid(dev,
1715 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1716 return mlx5_query_nic_vport_system_image_guid(dev,
1723 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1725 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1727 switch (MLX5_CAP_GEN(dev, port_type)) {
1728 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1729 return mlx5_query_hca_vport_node_guid(dev, node_guid);
1731 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1732 return mlx5_query_nic_vport_node_guid(dev, node_guid);
1738 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1740 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1742 switch (MLX5_CAP_GEN(dev, port_type)) {
1743 case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1744 return mlx5_query_hca_vport_port_guid(dev, port_guid);
1746 case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1747 return mlx5_query_nic_vport_port_guid(dev, port_guid);
1753 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1755 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1758 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1761 out = mlx5_vzalloc(outlen);
1765 err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1769 *vport_state = MLX5_GET(query_hca_vport_context_out, out,
1770 hca_vport_context.vport_state);
1776 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1778 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1779 u8 port_num, void *out, size_t sz)
1784 in = mlx5_vzalloc(sz);
1790 MLX5_SET(ppcnt_reg, in, local_port, port_num);
1792 MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
1793 err = mlx5_core_access_reg(dev, in, sz, out,
1794 sz, MLX5_REG_PPCNT, 0, 0);