2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
33 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
35 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
36 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
39 memset(in, 0, sizeof(in));
41 MLX5_SET(query_vport_state_in, in, opcode,
42 MLX5_CMD_OP_QUERY_VPORT_STATE);
43 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
45 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
48 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
50 return MLX5_GET(query_vport_state_out, out, state);
52 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
54 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u32 vport,
57 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
59 memset(in, 0, sizeof(in));
61 MLX5_SET(query_nic_vport_context_in, in, opcode,
62 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
64 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
66 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
68 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
71 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int *counter_set_id)
73 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
74 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
77 memset(in, 0, sizeof(in));
78 memset(out, 0, sizeof(out));
80 MLX5_SET(alloc_q_counter_in, in, opcode,
81 MLX5_CMD_OP_ALLOC_Q_COUNTER);
83 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
89 *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
94 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
97 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
98 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
100 memset(in, 0, sizeof(in));
101 memset(out, 0, sizeof(out));
103 MLX5_SET(dealloc_q_counter_in, in, opcode,
104 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
105 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
108 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
112 static int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
118 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
120 memset(in, 0, sizeof(in));
122 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
123 MLX5_SET(query_q_counter_in, in, clear, reset);
124 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
126 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
130 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
132 u32 *out_of_rx_buffer)
134 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
137 memset(out, 0, sizeof(out));
139 err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
145 *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
150 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
154 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
158 out = mlx5_vzalloc(outlen);
162 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
163 nic_vport_context.permanent_address);
165 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
169 ether_addr_copy(addr, &out_addr[2]);
175 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
177 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
178 u64 *system_image_guid)
181 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
184 out = mlx5_vzalloc(outlen);
188 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
192 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
193 nic_vport_context.system_image_guid);
198 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
200 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
203 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
206 out = mlx5_vzalloc(outlen);
210 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
214 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
215 nic_vport_context.node_guid);
221 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
223 int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev, u64 *port_guid)
226 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
229 out = mlx5_vzalloc(outlen);
233 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
237 *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
238 nic_vport_context.port_guid);
244 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_port_guid);
246 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
250 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
253 out = mlx5_vzalloc(outlen);
257 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
261 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
262 nic_vport_context.qkey_violation_counter);
268 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
270 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
273 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
275 MLX5_SET(modify_nic_vport_context_in, in, opcode,
276 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
278 memset(out, 0, sizeof(out));
279 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
282 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
286 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
289 in = mlx5_vzalloc(inlen);
291 mlx5_core_warn(mdev, "failed to allocate inbox\n");
295 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
296 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
299 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
306 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
307 bool other_vport, u8 *addr)
310 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
311 + MLX5_ST_SZ_BYTES(mac_address_layout);
316 in = mlx5_vzalloc(inlen);
318 mlx5_core_warn(mdev, "failed to allocate inbox\n");
322 MLX5_SET(modify_nic_vport_context_in, in,
323 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
324 MLX5_SET(modify_nic_vport_context_in, in,
325 vport_number, vport);
326 MLX5_SET(modify_nic_vport_context_in, in,
327 other_vport, other_vport);
328 MLX5_SET(modify_nic_vport_context_in, in,
329 field_select.addresses_list, 1);
330 MLX5_SET(modify_nic_vport_context_in, in,
331 nic_vport_context.allowed_list_type,
332 MLX5_NIC_VPORT_LIST_TYPE_UC);
333 MLX5_SET(modify_nic_vport_context_in, in,
334 nic_vport_context.allowed_list_size, 1);
336 mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
337 nic_vport_context.current_uc_mac_address);
338 mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
340 ether_addr_copy(mac_ptr, addr);
342 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
348 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
350 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
351 u16 *vlan_list, int list_len)
355 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
356 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
358 int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
360 if (list_len > max_list_size) {
361 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
362 list_len, max_list_size);
366 in = mlx5_vzalloc(inlen);
368 mlx5_core_warn(dev, "failed to allocate inbox\n");
372 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
374 MLX5_SET(modify_nic_vport_context_in, in,
376 MLX5_SET(modify_nic_vport_context_in, in,
377 field_select.addresses_list, 1);
379 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
381 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
382 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
383 MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
385 for (i = 0; i < list_len; i++) {
386 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
387 current_uc_mac_address[i]);
388 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
391 err = mlx5_modify_nic_vport_context(dev, in, inlen);
396 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
398 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
399 u64 *addr_list, size_t addr_list_len)
402 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
403 + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
406 int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
408 if ((int)addr_list_len > max_list_sz) {
409 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
410 (int)addr_list_len, max_list_sz);
414 in = mlx5_vzalloc(inlen);
416 mlx5_core_warn(mdev, "failed to allocate inbox\n");
420 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
422 MLX5_SET(modify_nic_vport_context_in, in,
424 MLX5_SET(modify_nic_vport_context_in, in,
425 field_select.addresses_list, 1);
427 ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
429 MLX5_SET(nic_vport_context, ctx, allowed_list_type,
430 MLX5_NIC_VPORT_LIST_TYPE_MC);
431 MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
433 for (i = 0; i < addr_list_len; i++) {
434 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
435 current_uc_mac_address[i]);
436 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
438 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
441 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
447 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
449 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
450 bool promisc_mc, bool promisc_uc,
453 u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
454 u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
457 memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
459 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
461 MLX5_SET(modify_nic_vport_context_in, in,
463 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
465 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
467 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
469 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
471 return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
473 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
475 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
477 enum mlx5_list_type list_type,
478 u8 addr_list[][ETH_ALEN],
481 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
491 req_list_size = *list_size;
493 max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
494 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
495 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
497 if (req_list_size > max_list_size) {
498 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
499 req_list_size, max_list_size);
500 req_list_size = max_list_size;
503 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
504 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
506 memset(in, 0, sizeof(in));
507 out = kzalloc(out_sz, GFP_KERNEL);
511 MLX5_SET(query_nic_vport_context_in, in, opcode,
512 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
513 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
514 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
517 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
519 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
523 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
525 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
528 *list_size = req_list_size;
529 for (i = 0; i < req_list_size; i++) {
530 mac_addr = MLX5_ADDR_OF(nic_vport_context,
532 current_uc_mac_address[i]) + 2;
533 ether_addr_copy(addr_list[i], mac_addr);
539 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
541 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
542 enum mlx5_list_type list_type,
543 u8 addr_list[][ETH_ALEN],
546 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
554 max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
555 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
556 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
558 if (list_size > max_list_size)
561 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
562 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
564 memset(out, 0, sizeof(out));
565 in = kzalloc(in_sz, GFP_KERNEL);
569 MLX5_SET(modify_nic_vport_context_in, in, opcode,
570 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
571 MLX5_SET(modify_nic_vport_context_in, in,
572 field_select.addresses_list, 1);
574 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
577 MLX5_SET(nic_vport_context, nic_vport_ctx,
578 allowed_list_type, list_type);
579 MLX5_SET(nic_vport_context, nic_vport_ctx,
580 allowed_list_size, list_size);
582 for (i = 0; i < list_size; i++) {
583 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
585 current_uc_mac_address[i]) + 2;
586 ether_addr_copy(curr_mac, addr_list[i]);
589 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
593 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
595 int mlx5_query_nic_vport_vlan_list(struct mlx5_core_dev *dev,
600 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
610 req_list_size = *list_size;
612 max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
614 if (req_list_size > max_list_size) {
615 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
616 req_list_size, max_list_size);
617 req_list_size = max_list_size;
620 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
621 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
623 memset(in, 0, sizeof(in));
624 out = kzalloc(out_sz, GFP_KERNEL);
628 MLX5_SET(query_nic_vport_context_in, in, opcode,
629 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
630 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
631 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
632 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
635 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
637 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
641 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
643 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
646 *list_size = req_list_size;
647 for (i = 0; i < req_list_size; i++) {
648 vlan_addr = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
649 current_uc_mac_address[i]);
650 vlan_list[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
656 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlan_list);
658 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
662 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
670 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
672 if (list_size > max_list_size)
675 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
676 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
678 memset(out, 0, sizeof(out));
679 in = kzalloc(in_sz, GFP_KERNEL);
683 MLX5_SET(modify_nic_vport_context_in, in, opcode,
684 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
685 MLX5_SET(modify_nic_vport_context_in, in,
686 field_select.addresses_list, 1);
688 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
691 MLX5_SET(nic_vport_context, nic_vport_ctx,
692 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
693 MLX5_SET(nic_vport_context, nic_vport_ctx,
694 allowed_list_size, list_size);
696 for (i = 0; i < list_size; i++) {
697 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
699 current_uc_mac_address[i]);
700 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
703 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
707 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
709 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
713 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
717 in = mlx5_vzalloc(inlen);
719 mlx5_core_warn(mdev, "failed to allocate inbox\n");
723 MLX5_SET(modify_nic_vport_context_in, in,
724 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
725 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
726 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
727 MLX5_SET(modify_nic_vport_context_in, in,
728 field_select.permanent_address, 1);
729 mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
730 nic_vport_context.permanent_address.mac_addr_47_32);
731 ether_addr_copy(mac_ptr, addr);
733 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
739 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
741 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
743 return mlx5_nic_vport_enable_disable_roce(mdev, 1);
745 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
747 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
749 return mlx5_nic_vport_enable_disable_roce(mdev, 0);
751 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
753 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
754 u8 port_num, u8 vport_num, u32 *out,
757 u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
758 int is_group_manager;
760 is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
762 memset(in, 0, sizeof(in));
764 MLX5_SET(query_hca_vport_context_in, in, opcode,
765 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
768 if (is_group_manager) {
769 MLX5_SET(query_hca_vport_context_in, in, other_vport,
771 MLX5_SET(query_hca_vport_context_in, in, vport_number,
778 if (MLX5_CAP_GEN(mdev, num_ports) == 2)
779 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
781 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
784 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
785 u64 *system_image_guid)
788 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
791 out = mlx5_vzalloc(outlen);
795 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
799 *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
800 hca_vport_context.system_image_guid);
806 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
808 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
811 int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
814 out = mlx5_vzalloc(outlen);
818 err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
822 *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
823 hca_vport_context.node_guid);
829 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
831 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
832 u16 vport_num, u16 gid_index, union ib_gid *gid)
834 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
835 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
836 int is_group_manager;
844 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
845 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
847 if (gid_index > tbsz && gid_index != 0xffff)
850 if (gid_index == 0xffff)
855 out_sz += nout * sizeof(*gid);
857 in = mlx5_vzalloc(in_sz);
858 out = mlx5_vzalloc(out_sz);
864 MLX5_SET(query_hca_vport_gid_in, in, opcode,
865 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
867 if (is_group_manager) {
868 MLX5_SET(query_hca_vport_gid_in, in, vport_number,
870 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
877 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
879 if (MLX5_CAP_GEN(dev, num_ports) == 2)
880 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
882 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
886 err = mlx5_cmd_status_to_err_v2(out);
890 tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
891 gid->global.subnet_prefix = tmp->global.subnet_prefix;
892 gid->global.interface_id = tmp->global.interface_id;
899 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
901 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
902 u8 port_num, u16 vf_num, u16 pkey_index,
905 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
906 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
907 int is_group_manager;
916 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
918 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
919 if (pkey_index > tbsz && pkey_index != 0xffff)
922 if (pkey_index == 0xffff)
927 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
929 in = kzalloc(in_sz, GFP_KERNEL);
930 out = kzalloc(out_sz, GFP_KERNEL);
932 MLX5_SET(query_hca_vport_pkey_in, in, opcode,
933 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
935 if (is_group_manager) {
936 MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
938 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
944 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
946 if (MLX5_CAP_GEN(dev, num_ports) == 2)
947 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
949 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
953 err = mlx5_cmd_status_to_err_v2(out);
957 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
958 for (i = 0; i < nout; i++, pkey++,
959 pkarr += MLX5_ST_SZ_BYTES(pkey))
960 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
967 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
969 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
970 u16 vport, void *in, int inlen)
972 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
975 memset(out, 0, sizeof(out));
977 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
979 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
981 MLX5_SET(modify_esw_vport_context_in, in, opcode,
982 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
984 err = mlx5_cmd_exec_check_status(mdev, in, inlen,
987 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
992 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
993 u8 insert_mode, u8 strip_mode,
994 u16 vlan, u8 cfi, u8 pcp)
996 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
998 memset(in, 0, sizeof(in));
1000 if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1001 MLX5_SET(modify_esw_vport_context_in, in,
1002 esw_vport_context.cvlan_cfi, cfi);
1003 MLX5_SET(modify_esw_vport_context_in, in,
1004 esw_vport_context.cvlan_pcp, pcp);
1005 MLX5_SET(modify_esw_vport_context_in, in,
1006 esw_vport_context.cvlan_id, vlan);
1009 MLX5_SET(modify_esw_vport_context_in, in,
1010 esw_vport_context.vport_cvlan_insert, insert_mode);
1012 MLX5_SET(modify_esw_vport_context_in, in,
1013 esw_vport_context.vport_cvlan_strip, strip_mode);
1015 MLX5_SET(modify_esw_vport_context_in, in, field_select,
1016 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1017 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1019 return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1021 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1023 int mlx5_arm_vport_context_events(struct mlx5_core_dev *mdev,
1028 u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1029 void *nic_vport_ctx;
1032 in = mlx5_vzalloc(inlen);
1036 MLX5_SET(modify_nic_vport_context_in,
1039 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1040 MLX5_SET(modify_nic_vport_context_in,
1042 field_select.change_event,
1044 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1046 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1047 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
1051 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
1053 if (events_mask & MLX5_UC_ADDR_CHANGE)
1054 MLX5_SET(nic_vport_context,
1056 event_on_uc_address_change,
1058 if (events_mask & MLX5_MC_ADDR_CHANGE)
1059 MLX5_SET(nic_vport_context,
1061 event_on_mc_address_change,
1063 if (events_mask & MLX5_VLAN_CHANGE)
1064 MLX5_SET(nic_vport_context,
1066 event_on_vlan_change,
1068 if (events_mask & MLX5_PROMISC_CHANGE)
1069 MLX5_SET(nic_vport_context,
1071 event_on_promisc_change,
1073 if (events_mask & MLX5_MTU_CHANGE)
1074 MLX5_SET(nic_vport_context,
1079 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1084 EXPORT_SYMBOL_GPL(mlx5_arm_vport_context_events);
1086 int mlx5_query_vport_promisc(struct mlx5_core_dev *mdev,
1093 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1096 out = kzalloc(outlen, GFP_KERNEL);
1100 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1104 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1105 nic_vport_context.promisc_uc);
1106 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1107 nic_vport_context.promisc_mc);
1108 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1109 nic_vport_context.promisc_all);
1115 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1117 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1123 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1126 in = mlx5_vzalloc(inlen);
1128 mlx5_core_err(mdev, "failed to allocate inbox\n");
1132 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1133 MLX5_SET(modify_nic_vport_context_in, in,
1134 nic_vport_context.promisc_uc, promisc_uc);
1135 MLX5_SET(modify_nic_vport_context_in, in,
1136 nic_vport_context.promisc_mc, promisc_mc);
1137 MLX5_SET(modify_nic_vport_context_in, in,
1138 nic_vport_context.promisc_all, promisc_all);
1140 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1144 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1146 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1147 u8 port_num, u16 vport_num,
1148 void *out, int out_size)
1150 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1151 int is_group_manager;
1155 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1157 in = mlx5_vzalloc(in_sz);
1161 MLX5_SET(query_vport_counter_in, in, opcode,
1162 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1164 if (is_group_manager) {
1165 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1166 MLX5_SET(query_vport_counter_in, in, vport_number,
1173 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1174 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1176 err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
1179 err = mlx5_cmd_status_to_err_v2(out);
1187 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1189 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1190 struct mlx5_vport_counters *vc)
1192 int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1196 out = mlx5_vzalloc(out_sz);
1200 err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1204 vc->received_errors.packets =
1205 MLX5_GET64(query_vport_counter_out,
1206 out, received_errors.packets);
1207 vc->received_errors.octets =
1208 MLX5_GET64(query_vport_counter_out,
1209 out, received_errors.octets);
1210 vc->transmit_errors.packets =
1211 MLX5_GET64(query_vport_counter_out,
1212 out, transmit_errors.packets);
1213 vc->transmit_errors.octets =
1214 MLX5_GET64(query_vport_counter_out,
1215 out, transmit_errors.octets);
1216 vc->received_ib_unicast.packets =
1217 MLX5_GET64(query_vport_counter_out,
1218 out, received_ib_unicast.packets);
1219 vc->received_ib_unicast.octets =
1220 MLX5_GET64(query_vport_counter_out,
1221 out, received_ib_unicast.octets);
1222 vc->transmitted_ib_unicast.packets =
1223 MLX5_GET64(query_vport_counter_out,
1224 out, transmitted_ib_unicast.packets);
1225 vc->transmitted_ib_unicast.octets =
1226 MLX5_GET64(query_vport_counter_out,
1227 out, transmitted_ib_unicast.octets);
1228 vc->received_ib_multicast.packets =
1229 MLX5_GET64(query_vport_counter_out,
1230 out, received_ib_multicast.packets);
1231 vc->received_ib_multicast.octets =
1232 MLX5_GET64(query_vport_counter_out,
1233 out, received_ib_multicast.octets);
1234 vc->transmitted_ib_multicast.packets =
1235 MLX5_GET64(query_vport_counter_out,
1236 out, transmitted_ib_multicast.packets);
1237 vc->transmitted_ib_multicast.octets =
1238 MLX5_GET64(query_vport_counter_out,
1239 out, transmitted_ib_multicast.octets);
1240 vc->received_eth_broadcast.packets =
1241 MLX5_GET64(query_vport_counter_out,
1242 out, received_eth_broadcast.packets);
1243 vc->received_eth_broadcast.octets =
1244 MLX5_GET64(query_vport_counter_out,
1245 out, received_eth_broadcast.octets);
1246 vc->transmitted_eth_broadcast.packets =
1247 MLX5_GET64(query_vport_counter_out,
1248 out, transmitted_eth_broadcast.packets);
1249 vc->transmitted_eth_broadcast.octets =
1250 MLX5_GET64(query_vport_counter_out,
1251 out, transmitted_eth_broadcast.octets);
1252 vc->received_eth_unicast.octets =
1253 MLX5_GET64(query_vport_counter_out,
1254 out, received_eth_unicast.octets);
1255 vc->received_eth_unicast.packets =
1256 MLX5_GET64(query_vport_counter_out,
1257 out, received_eth_unicast.packets);
1258 vc->transmitted_eth_unicast.octets =
1259 MLX5_GET64(query_vport_counter_out,
1260 out, transmitted_eth_unicast.octets);
1261 vc->transmitted_eth_unicast.packets =
1262 MLX5_GET64(query_vport_counter_out,
1263 out, transmitted_eth_unicast.packets);
1264 vc->received_eth_multicast.octets =
1265 MLX5_GET64(query_vport_counter_out,
1266 out, received_eth_multicast.octets);
1267 vc->received_eth_multicast.packets =
1268 MLX5_GET64(query_vport_counter_out,
1269 out, received_eth_multicast.packets);
1270 vc->transmitted_eth_multicast.octets =
1271 MLX5_GET64(query_vport_counter_out,
1272 out, transmitted_eth_multicast.octets);
1273 vc->transmitted_eth_multicast.packets =
1274 MLX5_GET64(query_vport_counter_out,
1275 out, transmitted_eth_multicast.packets);