2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
33 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
35 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
36 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
39 memset(in, 0, sizeof(in));
41 MLX5_SET(query_vport_state_in, in, opcode,
42 MLX5_CMD_OP_QUERY_VPORT_STATE);
43 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
45 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
48 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
50 return MLX5_GET(query_vport_state_out, out, state);
52 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
54 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u32 vport,
57 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
59 memset(in, 0, sizeof(in));
61 MLX5_SET(query_nic_vport_context_in, in, opcode,
62 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
64 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
66 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
68 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
71 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int *counter_set_id)
73 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
74 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_in)];
77 memset(in, 0, sizeof(in));
78 memset(out, 0, sizeof(out));
80 MLX5_SET(alloc_q_counter_in, in, opcode,
81 MLX5_CMD_OP_ALLOC_Q_COUNTER);
83 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
89 *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
94 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
97 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
98 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
100 memset(in, 0, sizeof(in));
101 memset(out, 0, sizeof(out));
103 MLX5_SET(dealloc_q_counter_in, in, opcode,
104 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
105 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
108 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
112 static int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
118 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
120 memset(in, 0, sizeof(in));
122 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
123 MLX5_SET(query_q_counter_in, in, clear, reset);
124 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
126 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
130 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
132 u32 *out_of_rx_buffer)
134 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
137 memset(out, 0, sizeof(out));
139 err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
145 *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
150 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
154 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
158 out = mlx5_vzalloc(outlen);
162 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
163 nic_vport_context.permanent_address);
165 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
169 ether_addr_copy(addr, &out_addr[2]);
175 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
177 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
178 u64 *system_image_guid)
181 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
184 out = mlx5_vzalloc(outlen);
188 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
192 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
193 nic_vport_context.system_image_guid);
198 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
200 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
203 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
206 out = mlx5_vzalloc(outlen);
210 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
214 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
215 nic_vport_context.node_guid);
221 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
223 int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev, u64 *port_guid)
226 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
229 out = mlx5_vzalloc(outlen);
233 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
237 *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
238 nic_vport_context.port_guid);
244 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_port_guid);
246 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
250 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
253 out = mlx5_vzalloc(outlen);
257 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
261 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
262 nic_vport_context.qkey_violation_counter);
268 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
270 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
273 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
275 MLX5_SET(modify_nic_vport_context_in, in, opcode,
276 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
278 memset(out, 0, sizeof(out));
279 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
282 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
286 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
289 in = mlx5_vzalloc(inlen);
291 mlx5_core_warn(mdev, "failed to allocate inbox\n");
295 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
296 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
299 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
306 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
307 bool other_vport, u8 *addr)
310 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
311 + MLX5_ST_SZ_BYTES(mac_address_layout);
316 in = mlx5_vzalloc(inlen);
318 mlx5_core_warn(mdev, "failed to allocate inbox\n");
322 MLX5_SET(modify_nic_vport_context_in, in,
323 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
324 MLX5_SET(modify_nic_vport_context_in, in,
325 vport_number, vport);
326 MLX5_SET(modify_nic_vport_context_in, in,
327 other_vport, other_vport);
328 MLX5_SET(modify_nic_vport_context_in, in,
329 field_select.addresses_list, 1);
330 MLX5_SET(modify_nic_vport_context_in, in,
331 nic_vport_context.allowed_list_type,
332 MLX5_NIC_VPORT_LIST_TYPE_UC);
333 MLX5_SET(modify_nic_vport_context_in, in,
334 nic_vport_context.allowed_list_size, 1);
336 mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
337 nic_vport_context.current_uc_mac_address);
338 mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
340 ether_addr_copy(mac_ptr, addr);
342 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
348 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
350 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
351 u16 *vlan_list, int list_len)
355 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
356 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
358 int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
360 if (list_len > max_list_size) {
361 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
362 list_len, max_list_size);
366 in = mlx5_vzalloc(inlen);
368 mlx5_core_warn(dev, "failed to allocate inbox\n");
372 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
374 MLX5_SET(modify_nic_vport_context_in, in,
376 MLX5_SET(modify_nic_vport_context_in, in,
377 field_select.addresses_list, 1);
379 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
381 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
382 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
383 MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
385 for (i = 0; i < list_len; i++) {
386 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
387 current_uc_mac_address[i]);
388 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
391 err = mlx5_modify_nic_vport_context(dev, in, inlen);
396 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
398 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
399 u64 *addr_list, size_t addr_list_len)
402 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
403 + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
406 int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
408 if ((int)addr_list_len > max_list_sz) {
409 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
410 (int)addr_list_len, max_list_sz);
414 in = mlx5_vzalloc(inlen);
416 mlx5_core_warn(mdev, "failed to allocate inbox\n");
420 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
422 MLX5_SET(modify_nic_vport_context_in, in,
424 MLX5_SET(modify_nic_vport_context_in, in,
425 field_select.addresses_list, 1);
427 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
429 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
430 MLX5_NIC_VPORT_LIST_TYPE_MC);
431 MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
433 for (i = 0; i < addr_list_len; i++) {
434 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
435 current_uc_mac_address[i]);
436 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
438 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
441 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
447 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
449 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
450 bool promisc_mc, bool promisc_uc,
453 u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
454 u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
457 memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
459 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
461 MLX5_SET(modify_nic_vport_context_in, in,
463 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
465 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
467 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
469 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
471 return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
473 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
474 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
478 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
482 in = mlx5_vzalloc(inlen);
484 mlx5_core_warn(mdev, "failed to allocate inbox\n");
488 MLX5_SET(modify_nic_vport_context_in, in,
489 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
490 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
491 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
492 MLX5_SET(modify_nic_vport_context_in, in,
493 field_select.permanent_address, 1);
494 mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
495 nic_vport_context.permanent_address.mac_addr_47_32);
496 ether_addr_copy(mac_ptr, addr);
498 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
504 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
506 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
508 return mlx5_nic_vport_enable_disable_roce(mdev, 1);
510 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
512 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
514 return mlx5_nic_vport_enable_disable_roce(mdev, 0);
516 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
518 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
519 u8 port_num, u8 vport_num, u32 *out,
522 u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
523 int is_group_manager;
525 is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
527 memset(in, 0, sizeof(in));
529 MLX5_SET(query_hca_vport_context_in, in, opcode,
530 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
533 if (is_group_manager) {
534 MLX5_SET(query_hca_vport_context_in, in, other_vport,
536 MLX5_SET(query_hca_vport_context_in, in, vport_number,
543 if (MLX5_CAP_GEN(mdev, num_ports) == 2)
544 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
546 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
549 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
550 u64 *system_image_guid)
553 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
556 out = mlx5_vzalloc(outlen);
560 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
564 *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
565 hca_vport_context.system_image_guid);
571 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
573 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
576 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
579 out = mlx5_vzalloc(outlen);
583 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
587 *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
588 hca_vport_context.node_guid);
594 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
596 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
597 u16 vport_num, u16 gid_index, union ib_gid *gid)
599 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
600 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
601 int is_group_manager;
609 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
610 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
612 if (gid_index > tbsz && gid_index != 0xffff)
615 if (gid_index == 0xffff)
620 out_sz += nout * sizeof(*gid);
622 in = mlx5_vzalloc(in_sz);
623 out = mlx5_vzalloc(out_sz);
629 MLX5_SET(query_hca_vport_gid_in, in, opcode,
630 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
632 if (is_group_manager) {
633 MLX5_SET(query_hca_vport_gid_in, in, vport_number,
635 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
642 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
644 if (MLX5_CAP_GEN(dev, num_ports) == 2)
645 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
647 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
651 err = mlx5_cmd_status_to_err_v2(out);
655 tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
656 gid->global.subnet_prefix = tmp->global.subnet_prefix;
657 gid->global.interface_id = tmp->global.interface_id;
664 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
666 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
667 u8 port_num, u16 vf_num, u16 pkey_index,
670 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
671 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
672 int is_group_manager;
681 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
683 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
684 if (pkey_index > tbsz && pkey_index != 0xffff)
687 if (pkey_index == 0xffff)
692 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
694 in = kzalloc(in_sz, GFP_KERNEL);
695 out = kzalloc(out_sz, GFP_KERNEL);
697 MLX5_SET(query_hca_vport_pkey_in, in, opcode,
698 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
700 if (is_group_manager) {
701 MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
703 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
709 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
711 if (MLX5_CAP_GEN(dev, num_ports) == 2)
712 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
714 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
718 err = mlx5_cmd_status_to_err_v2(out);
722 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
723 for (i = 0; i < nout; i++, pkey++,
724 pkarr += MLX5_ST_SZ_BYTES(pkey))
725 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
732 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
734 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
735 u16 vport, void *in, int inlen)
737 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
740 memset(out, 0, sizeof(out));
742 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
744 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
746 MLX5_SET(modify_esw_vport_context_in, in, opcode,
747 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
749 err = mlx5_cmd_exec_check_status(mdev, in, inlen,
752 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
757 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
758 u8 insert_mode, u8 strip_mode,
759 u16 vlan, u8 cfi, u8 pcp)
761 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
763 memset(in, 0, sizeof(in));
765 if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
766 MLX5_SET(modify_esw_vport_context_in, in,
767 esw_vport_context.cvlan_cfi, cfi);
768 MLX5_SET(modify_esw_vport_context_in, in,
769 esw_vport_context.cvlan_pcp, pcp);
770 MLX5_SET(modify_esw_vport_context_in, in,
771 esw_vport_context.cvlan_id, vlan);
774 MLX5_SET(modify_esw_vport_context_in, in,
775 esw_vport_context.vport_cvlan_insert, insert_mode);
777 MLX5_SET(modify_esw_vport_context_in, in,
778 esw_vport_context.vport_cvlan_strip, strip_mode);
780 MLX5_SET(modify_esw_vport_context_in, in, field_select,
781 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
782 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
784 return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
786 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
788 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
789 u8 port_num, u16 vport_num,
790 void *out, int out_size)
792 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
793 int is_group_manager;
797 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
799 in = mlx5_vzalloc(in_sz);
803 MLX5_SET(query_vport_counter_in, in, opcode,
804 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
806 if (is_group_manager) {
807 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
808 MLX5_SET(query_vport_counter_in, in, vport_number,
815 if (MLX5_CAP_GEN(dev, num_ports) == 2)
816 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
818 err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
821 err = mlx5_cmd_status_to_err_v2(out);
829 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
831 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
832 struct mlx5_vport_counters *vc)
834 int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
838 out = mlx5_vzalloc(out_sz);
842 err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
846 vc->received_errors.packets =
847 MLX5_GET64(query_vport_counter_out,
848 out, received_errors.packets);
849 vc->received_errors.octets =
850 MLX5_GET64(query_vport_counter_out,
851 out, received_errors.octets);
852 vc->transmit_errors.packets =
853 MLX5_GET64(query_vport_counter_out,
854 out, transmit_errors.packets);
855 vc->transmit_errors.octets =
856 MLX5_GET64(query_vport_counter_out,
857 out, transmit_errors.octets);
858 vc->received_ib_unicast.packets =
859 MLX5_GET64(query_vport_counter_out,
860 out, received_ib_unicast.packets);
861 vc->received_ib_unicast.octets =
862 MLX5_GET64(query_vport_counter_out,
863 out, received_ib_unicast.octets);
864 vc->transmitted_ib_unicast.packets =
865 MLX5_GET64(query_vport_counter_out,
866 out, transmitted_ib_unicast.packets);
867 vc->transmitted_ib_unicast.octets =
868 MLX5_GET64(query_vport_counter_out,
869 out, transmitted_ib_unicast.octets);
870 vc->received_ib_multicast.packets =
871 MLX5_GET64(query_vport_counter_out,
872 out, received_ib_multicast.packets);
873 vc->received_ib_multicast.octets =
874 MLX5_GET64(query_vport_counter_out,
875 out, received_ib_multicast.octets);
876 vc->transmitted_ib_multicast.packets =
877 MLX5_GET64(query_vport_counter_out,
878 out, transmitted_ib_multicast.packets);
879 vc->transmitted_ib_multicast.octets =
880 MLX5_GET64(query_vport_counter_out,
881 out, transmitted_ib_multicast.octets);
882 vc->received_eth_broadcast.packets =
883 MLX5_GET64(query_vport_counter_out,
884 out, received_eth_broadcast.packets);
885 vc->received_eth_broadcast.octets =
886 MLX5_GET64(query_vport_counter_out,
887 out, received_eth_broadcast.octets);
888 vc->transmitted_eth_broadcast.packets =
889 MLX5_GET64(query_vport_counter_out,
890 out, transmitted_eth_broadcast.packets);
891 vc->transmitted_eth_broadcast.octets =
892 MLX5_GET64(query_vport_counter_out,
893 out, transmitted_eth_broadcast.octets);
894 vc->received_eth_unicast.octets =
895 MLX5_GET64(query_vport_counter_out,
896 out, received_eth_unicast.octets);
897 vc->received_eth_unicast.packets =
898 MLX5_GET64(query_vport_counter_out,
899 out, received_eth_unicast.packets);
900 vc->transmitted_eth_unicast.octets =
901 MLX5_GET64(query_vport_counter_out,
902 out, transmitted_eth_unicast.octets);
903 vc->transmitted_eth_unicast.packets =
904 MLX5_GET64(query_vport_counter_out,
905 out, transmitted_eth_unicast.packets);
906 vc->received_eth_multicast.octets =
907 MLX5_GET64(query_vport_counter_out,
908 out, received_eth_multicast.octets);
909 vc->received_eth_multicast.packets =
910 MLX5_GET64(query_vport_counter_out,
911 out, received_eth_multicast.packets);
912 vc->transmitted_eth_multicast.octets =
913 MLX5_GET64(query_vport_counter_out,
914 out, transmitted_eth_multicast.octets);
915 vc->transmitted_eth_multicast.packets =
916 MLX5_GET64(query_vport_counter_out,
917 out, transmitted_eth_multicast.packets);