]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mlx5/mlx5_core/mlx5_vport.c
MFC r341556:
[FreeBSD/FreeBSD.git] / sys / dev / mlx5 / mlx5_core / mlx5_vport.c
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
32
33 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
34                                          int inlen);
35
36 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37                                    u16 vport, u32 *out, int outlen)
38 {
39         int err;
40         u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
41
42         MLX5_SET(query_vport_state_in, in, opcode,
43                  MLX5_CMD_OP_QUERY_VPORT_STATE);
44         MLX5_SET(query_vport_state_in, in, op_mod, opmod);
45         MLX5_SET(query_vport_state_in, in, vport_number, vport);
46         if (vport)
47                 MLX5_SET(query_vport_state_in, in, other_vport, 1);
48
49         err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
50         if (err)
51                 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
52
53         return err;
54 }
55
56 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
57 {
58         u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
59
60         _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
61
62         return MLX5_GET(query_vport_state_out, out, state);
63 }
64 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
65
66 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
67 {
68         u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
69
70         _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
71
72         return MLX5_GET(query_vport_state_out, out, admin_state);
73 }
74 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
75
76 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
77                                   u16 vport, u8 state)
78 {
79         u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
80         u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
81         int err;
82
83         MLX5_SET(modify_vport_state_in, in, opcode,
84                  MLX5_CMD_OP_MODIFY_VPORT_STATE);
85         MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
86         MLX5_SET(modify_vport_state_in, in, vport_number, vport);
87
88         if (vport)
89                 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
90
91         MLX5_SET(modify_vport_state_in, in, admin_state, state);
92
93         err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
94         if (err)
95                 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
96
97         return err;
98 }
99 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
100
101 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
102                                         u32 *out, int outlen)
103 {
104         u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
105
106         MLX5_SET(query_nic_vport_context_in, in, opcode,
107                  MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
108
109         MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
110         if (vport)
111                 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
112
113         return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
114 }
115
116 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
117                                               int client_id)
118 {
119         switch (client_id) {
120         case MLX5_INTERFACE_PROTOCOL_IB:
121                 return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
122                         MLX5_QCOUNTER_SETS_NETDEV);
123         case MLX5_INTERFACE_PROTOCOL_ETH:
124                 return MLX5_QCOUNTER_SETS_NETDEV;
125         default:
126                 mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
127                 return 0;
128         }
129 }
130
131 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
132                                int client_id, u16 *counter_set_id)
133 {
134         u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
135         u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
136         int err;
137
138         if (mdev->num_q_counter_allocated[client_id] >
139             mlx5_vport_max_q_counter_allocator(mdev, client_id))
140                 return -EINVAL;
141
142         MLX5_SET(alloc_q_counter_in, in, opcode,
143                  MLX5_CMD_OP_ALLOC_Q_COUNTER);
144
145         err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
146
147         if (!err)
148                 *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
149                                            counter_set_id);
150
151         mdev->num_q_counter_allocated[client_id]++;
152
153         return err;
154 }
155
156 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
157                                  int client_id, u16 counter_set_id)
158 {
159         u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
160         u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
161         int err;
162
163         if (mdev->num_q_counter_allocated[client_id] <= 0)
164                 return -EINVAL;
165
166         MLX5_SET(dealloc_q_counter_in, in, opcode,
167                  MLX5_CMD_OP_DEALLOC_Q_COUNTER);
168         MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
169                  counter_set_id);
170
171         err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
172
173         mdev->num_q_counter_allocated[client_id]--;
174
175         return err;
176 }
177
178 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
179                                       u16 counter_set_id,
180                                       int reset,
181                                       void *out,
182                                       int out_size)
183 {
184         u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
185
186         MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
187         MLX5_SET(query_q_counter_in, in, clear, reset);
188         MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
189
190         return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
191 }
192
193 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
194                                       u16 counter_set_id,
195                                       u32 *out_of_rx_buffer)
196 {
197         u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {0};
198         int err;
199
200         err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
201                                          sizeof(out));
202
203         if (err)
204                 return err;
205
206         *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
207                                      out_of_buffer);
208         return err;
209 }
210
211 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
212                                     u16 vport, u8 *min_inline)
213 {
214         u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
215         int err;
216
217         err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
218         if (!err)
219                 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
220                                        nic_vport_context.min_wqe_inline_mode);
221         return err;
222 }
223 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
224
225 void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
226                            u8 *min_inline_mode)
227 {
228         switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
229         case MLX5_CAP_INLINE_MODE_L2:
230                 *min_inline_mode = MLX5_INLINE_MODE_L2;
231                 break;
232         case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
233                 mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
234                 break;
235         case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
236                 *min_inline_mode = MLX5_INLINE_MODE_NONE;
237                 break;
238         }
239 }
240 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
241
242 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
243                                      u16 vport, u8 min_inline)
244 {
245         u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
246         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
247         void *nic_vport_ctx;
248
249         MLX5_SET(modify_nic_vport_context_in, in,
250                  field_select.min_wqe_inline_mode, 1);
251         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
252         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
253
254         nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
255                                      in, nic_vport_context);
256         MLX5_SET(nic_vport_context, nic_vport_ctx,
257                  min_wqe_inline_mode, min_inline);
258
259         return mlx5_modify_nic_vport_context(mdev, in, inlen);
260 }
261 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_min_inline);
262
263 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
264                                      u16 vport, u8 *addr)
265 {
266         u32 *out;
267         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
268         u8 *out_addr;
269         int err;
270
271         out = mlx5_vzalloc(outlen);
272         if (!out)
273                 return -ENOMEM;
274
275         out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
276                                 nic_vport_context.permanent_address);
277
278         err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
279         if (err)
280                 goto out;
281
282         ether_addr_copy(addr, &out_addr[2]);
283
284 out:
285         kvfree(out);
286         return err;
287 }
288 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
289
290 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
291                                       u16 vport, u8 *addr)
292 {
293         void *in;
294         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
295         int err;
296         void *nic_vport_ctx;
297         u8 *perm_mac;
298
299         in = mlx5_vzalloc(inlen);
300         if (!in) {
301                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
302                 return -ENOMEM;
303         }
304
305         MLX5_SET(modify_nic_vport_context_in, in,
306                  field_select.permanent_address, 1);
307         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
308
309         if (vport)
310                 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
311
312         nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
313                                      in, nic_vport_context);
314         perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
315                                 permanent_address);
316
317         ether_addr_copy(&perm_mac[2], addr);
318
319         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
320
321         kvfree(in);
322
323         return err;
324 }
325 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
326
327 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
328                                            u64 *system_image_guid)
329 {
330         u32 *out;
331         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
332         int err;
333
334         out = mlx5_vzalloc(outlen);
335         if (!out)
336                 return -ENOMEM;
337
338         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
339         if (err)
340                 goto out;
341
342         *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
343                                         nic_vport_context.system_image_guid);
344 out:
345         kvfree(out);
346         return err;
347 }
348 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
349
350 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
351 {
352         u32 *out;
353         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
354         int err;
355
356         out = mlx5_vzalloc(outlen);
357         if (!out)
358                 return -ENOMEM;
359
360         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
361         if (err)
362                 goto out;
363
364         *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
365                                 nic_vport_context.node_guid);
366
367 out:
368         kvfree(out);
369         return err;
370 }
371 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
372
373 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
374                                           u64 *port_guid)
375 {
376         u32 *out;
377         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
378         int err;
379
380         out = mlx5_vzalloc(outlen);
381         if (!out)
382                 return -ENOMEM;
383
384         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
385         if (err)
386                 goto out;
387
388         *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
389                                 nic_vport_context.port_guid);
390
391 out:
392         kvfree(out);
393         return err;
394 }
395
396 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
397                                         u16 *qkey_viol_cntr)
398 {
399         u32 *out;
400         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
401         int err;
402
403         out = mlx5_vzalloc(outlen);
404         if (!out)
405                 return -ENOMEM;
406
407         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
408         if (err)
409                 goto out;
410
411         *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
412                                 nic_vport_context.qkey_violation_counter);
413
414 out:
415         kvfree(out);
416         return err;
417 }
418 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
419
420 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
421                                          int inlen)
422 {
423         u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
424
425         MLX5_SET(modify_nic_vport_context_in, in, opcode,
426                  MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
427
428         return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
429 }
430
431 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
432                                               int enable_disable)
433 {
434         void *in;
435         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
436         int err;
437
438         in = mlx5_vzalloc(inlen);
439         if (!in) {
440                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
441                 return -ENOMEM;
442         }
443
444         MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
445         MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
446                  enable_disable);
447
448         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
449
450         kvfree(in);
451
452         return err;
453 }
454
455 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
456                                    bool other_vport, u8 *addr)
457 {
458         void *in;
459         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
460                   + MLX5_ST_SZ_BYTES(mac_address_layout);
461         u8  *mac_layout;
462         u8  *mac_ptr;
463         int err;
464
465         in = mlx5_vzalloc(inlen);
466         if (!in) {
467                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
468                 return -ENOMEM;
469         }
470
471         MLX5_SET(modify_nic_vport_context_in, in,
472                  opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
473         MLX5_SET(modify_nic_vport_context_in, in,
474                  vport_number, vport);
475         MLX5_SET(modify_nic_vport_context_in, in,
476                  other_vport, other_vport);
477         MLX5_SET(modify_nic_vport_context_in, in,
478                  field_select.addresses_list, 1);
479         MLX5_SET(modify_nic_vport_context_in, in,
480                  nic_vport_context.allowed_list_type,
481                  MLX5_NIC_VPORT_LIST_TYPE_UC);
482         MLX5_SET(modify_nic_vport_context_in, in,
483                  nic_vport_context.allowed_list_size, 1);
484
485         mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
486                 nic_vport_context.current_uc_mac_address);
487         mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
488                 mac_addr_47_32);
489         ether_addr_copy(mac_ptr, addr);
490
491         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
492
493         kvfree(in);
494
495         return err;
496 }
497 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
498
499 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
500                                     u32 vport, u64 node_guid)
501 {
502         void *in;
503         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
504         int err;
505         void *nic_vport_context;
506
507         if (!vport)
508                 return -EINVAL;
509         if (!MLX5_CAP_GEN(mdev, vport_group_manager))
510                 return -EPERM;
511         if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
512                 return -ENOTSUPP;
513
514         in = mlx5_vzalloc(inlen);
515         if (!in) {
516                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
517                 return -ENOMEM;
518         }
519
520         MLX5_SET(modify_nic_vport_context_in, in,
521                  field_select.node_guid, 1);
522         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
523
524         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
525
526         nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
527                                          in, nic_vport_context);
528         MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
529
530         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
531
532         kvfree(in);
533
534         return err;
535 }
536 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
537
538 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
539                                     u32 vport, u64 port_guid)
540 {
541         void *in;
542         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
543         int err;
544         void *nic_vport_context;
545
546         if (!vport)
547                 return -EINVAL;
548         if (!MLX5_CAP_GEN(mdev, vport_group_manager))
549                 return -EPERM;
550         if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
551                 return -ENOTSUPP;
552
553         in = mlx5_vzalloc(inlen);
554         if (!in) {
555                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
556                 return -ENOMEM;
557         }
558
559         MLX5_SET(modify_nic_vport_context_in, in,
560                  field_select.port_guid, 1);
561         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
562
563         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
564
565         nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
566                                          in, nic_vport_context);
567         MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
568
569         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
570
571         kvfree(in);
572
573         return err;
574 }
575 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
576
577 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
578                                  u16 *vlan_list, int list_len)
579 {
580         void *in, *ctx;
581         int i, err;
582         int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
583                 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
584
585         int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
586
587         if (list_len > max_list_size) {
588                 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
589                                list_len, max_list_size);
590                 return -ENOSPC;
591         }
592
593         in = mlx5_vzalloc(inlen);
594         if (!in) {
595                 mlx5_core_warn(dev, "failed to allocate inbox\n");
596                 return -ENOMEM;
597         }
598
599         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
600         if (vport)
601                 MLX5_SET(modify_nic_vport_context_in, in,
602                          other_vport, 1);
603         MLX5_SET(modify_nic_vport_context_in, in,
604                  field_select.addresses_list, 1);
605
606         ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
607
608         MLX5_SET(nic_vport_context, ctx, allowed_list_type,
609                  MLX5_NIC_VPORT_LIST_TYPE_VLAN);
610         MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
611
612         for (i = 0; i < list_len; i++) {
613                 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
614                                          current_uc_mac_address[i]);
615                 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
616         }
617
618         err = mlx5_modify_nic_vport_context(dev, in, inlen);
619
620         kvfree(in);
621         return err;
622 }
623 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
624
625 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
626                                u64 *addr_list, size_t addr_list_len)
627 {
628         void *in, *ctx;
629         int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
630                   + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
631         int err;
632         size_t i;
633         int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
634
635         if ((int)addr_list_len > max_list_sz) {
636                 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
637                                (int)addr_list_len, max_list_sz);
638                 return -ENOSPC;
639         }
640
641         in = mlx5_vzalloc(inlen);
642         if (!in) {
643                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
644                 return -ENOMEM;
645         }
646
647         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
648         if (vport)
649                 MLX5_SET(modify_nic_vport_context_in, in,
650                          other_vport, 1);
651         MLX5_SET(modify_nic_vport_context_in, in,
652                  field_select.addresses_list, 1);
653
654         ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
655
656         MLX5_SET(nic_vport_context, ctx, allowed_list_type,
657                  MLX5_NIC_VPORT_LIST_TYPE_MC);
658         MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
659
660         for (i = 0; i < addr_list_len; i++) {
661                 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
662                                                   current_uc_mac_address[i]);
663                 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
664                                                  mac_addr_47_32);
665                 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
666         }
667
668         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
669
670         kvfree(in);
671
672         return err;
673 }
674 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
675
676 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
677                                bool promisc_mc, bool promisc_uc,
678                                bool promisc_all)
679 {
680         u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
681         u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
682                                nic_vport_context);
683
684         memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
685
686         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
687         if (vport)
688                 MLX5_SET(modify_nic_vport_context_in, in,
689                          other_vport, 1);
690         MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
691         if (promisc_mc)
692                 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
693         if (promisc_uc)
694                 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
695         if (promisc_all)
696                 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
697
698         return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
699 }
700 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
701
702 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
703                                   u16 vport,
704                                   enum mlx5_list_type list_type,
705                                   u8 addr_list[][ETH_ALEN],
706                                   int *list_size)
707 {
708         u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
709         void *nic_vport_ctx;
710         int max_list_size;
711         int req_list_size;
712         int out_sz;
713         void *out;
714         int err;
715         int i;
716
717         req_list_size = *list_size;
718
719         max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
720                         1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
721                         1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
722
723         if (req_list_size > max_list_size) {
724                 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
725                                req_list_size, max_list_size);
726                 req_list_size = max_list_size;
727         }
728
729         out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
730                  req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
731
732         out = kzalloc(out_sz, GFP_KERNEL);
733         if (!out)
734                 return -ENOMEM;
735
736         MLX5_SET(query_nic_vport_context_in, in, opcode,
737                  MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
738         MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
739         MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
740
741         if (vport)
742                 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
743
744         err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
745         if (err)
746                 goto out;
747
748         nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
749                                      nic_vport_context);
750         req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
751                                  allowed_list_size);
752
753         *list_size = req_list_size;
754         for (i = 0; i < req_list_size; i++) {
755                 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
756                                         nic_vport_ctx,
757                                         current_uc_mac_address[i]) + 2;
758                 ether_addr_copy(addr_list[i], mac_addr);
759         }
760 out:
761         kfree(out);
762         return err;
763 }
764 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
765
766 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
767                                    enum mlx5_list_type list_type,
768                                    u8 addr_list[][ETH_ALEN],
769                                    int list_size)
770 {
771         u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
772         void *nic_vport_ctx;
773         int max_list_size;
774         int in_sz;
775         void *in;
776         int err;
777         int i;
778
779         max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
780                  1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
781                  1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
782
783         if (list_size > max_list_size)
784                 return -ENOSPC;
785
786         in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
787                 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
788
789         in = kzalloc(in_sz, GFP_KERNEL);
790         if (!in)
791                 return -ENOMEM;
792
793         MLX5_SET(modify_nic_vport_context_in, in, opcode,
794                  MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
795         MLX5_SET(modify_nic_vport_context_in, in,
796                  field_select.addresses_list, 1);
797
798         nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
799                                      nic_vport_context);
800
801         MLX5_SET(nic_vport_context, nic_vport_ctx,
802                  allowed_list_type, list_type);
803         MLX5_SET(nic_vport_context, nic_vport_ctx,
804                  allowed_list_size, list_size);
805
806         for (i = 0; i < list_size; i++) {
807                 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
808                                             nic_vport_ctx,
809                                             current_uc_mac_address[i]) + 2;
810                 ether_addr_copy(curr_mac, addr_list[i]);
811         }
812
813         err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
814         kfree(in);
815         return err;
816 }
817 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
818
819 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
820                                u16 vport,
821                                u16 vlans[],
822                                int *size)
823 {
824         u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
825         void *nic_vport_ctx;
826         int req_list_size;
827         int max_list_size;
828         int out_sz;
829         void *out;
830         int err;
831         int i;
832
833         req_list_size = *size;
834         max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
835         if (req_list_size > max_list_size) {
836                 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
837                                req_list_size, max_list_size);
838                 req_list_size = max_list_size;
839         }
840
841         out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
842                  req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
843
844         out = kzalloc(out_sz, GFP_KERNEL);
845         if (!out)
846                 return -ENOMEM;
847
848         MLX5_SET(query_nic_vport_context_in, in, opcode,
849                  MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
850         MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
851                  MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
852         MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
853
854         if (vport)
855                 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
856
857         err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
858         if (err)
859                 goto out;
860
861         nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
862                                      nic_vport_context);
863         req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
864                                  allowed_list_size);
865
866         *size = req_list_size;
867         for (i = 0; i < req_list_size; i++) {
868                 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
869                                                nic_vport_ctx,
870                                          current_uc_mac_address[i]);
871                 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
872         }
873 out:
874         kfree(out);
875         return err;
876 }
877 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
878
879 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
880                                 u16 vlans[],
881                                 int list_size)
882 {
883         u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
884         void *nic_vport_ctx;
885         int max_list_size;
886         int in_sz;
887         void *in;
888         int err;
889         int i;
890
891         max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
892
893         if (list_size > max_list_size)
894                 return -ENOSPC;
895
896         in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
897                 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
898
899         in = kzalloc(in_sz, GFP_KERNEL);
900         if (!in)
901                 return -ENOMEM;
902
903         MLX5_SET(modify_nic_vport_context_in, in, opcode,
904                  MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
905         MLX5_SET(modify_nic_vport_context_in, in,
906                  field_select.addresses_list, 1);
907
908         nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
909                                      nic_vport_context);
910
911         MLX5_SET(nic_vport_context, nic_vport_ctx,
912                  allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
913         MLX5_SET(nic_vport_context, nic_vport_ctx,
914                  allowed_list_size, list_size);
915
916         for (i = 0; i < list_size; i++) {
917                 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
918                                                nic_vport_ctx,
919                                                current_uc_mac_address[i]);
920                 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
921         }
922
923         err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
924         kfree(in);
925         return err;
926 }
927 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
928
929 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
930 {
931         u32 *out;
932         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
933         int err;
934
935         out = kzalloc(outlen, GFP_KERNEL);
936         if (!out)
937                 return -ENOMEM;
938
939         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
940         if (err)
941                 goto out;
942
943         *enable = MLX5_GET(query_nic_vport_context_out, out,
944                                 nic_vport_context.roce_en);
945
946 out:
947         kfree(out);
948         return err;
949 }
950 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
951
952 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
953                                      u8 *addr)
954 {
955         void *in;
956         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
957         u8  *mac_ptr;
958         int err;
959
960         in = mlx5_vzalloc(inlen);
961         if (!in) {
962                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
963                 return -ENOMEM;
964         }
965
966         MLX5_SET(modify_nic_vport_context_in, in,
967                  opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
968         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
969         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
970         MLX5_SET(modify_nic_vport_context_in, in,
971                  field_select.permanent_address, 1);
972         mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
973                 nic_vport_context.permanent_address.mac_addr_47_32);
974         ether_addr_copy(mac_ptr, addr);
975
976         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
977
978         kvfree(in);
979
980         return err;
981 }
982 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
983
984 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
985 {
986         return mlx5_nic_vport_enable_disable_roce(mdev, 1);
987 }
988 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
989
990 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
991 {
992         return mlx5_nic_vport_enable_disable_roce(mdev, 0);
993 }
994 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
995
996 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
997                                   int vf, u8 port_num, void *out,
998                                   size_t out_sz)
999 {
1000         int     in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1001         int     is_group_manager;
1002         void   *in;
1003         int     err;
1004
1005         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1006         in = mlx5_vzalloc(in_sz);
1007         if (!in) {
1008                 err = -ENOMEM;
1009                 return err;
1010         }
1011
1012         MLX5_SET(query_vport_counter_in, in, opcode,
1013                  MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1014         if (other_vport) {
1015                 if (is_group_manager) {
1016                         MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1017                         MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1018                 } else {
1019                         err = -EPERM;
1020                         goto free;
1021                 }
1022         }
1023         if (MLX5_CAP_GEN(dev, num_ports) == 2)
1024                 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1025
1026         err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
1027 free:
1028         kvfree(in);
1029         return err;
1030 }
1031 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1032
1033 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
1034                                  u8 port_num, u8 vport_num, u32 *out,
1035                                  int outlen)
1036 {
1037         u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
1038         int is_group_manager;
1039
1040         is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
1041
1042         MLX5_SET(query_hca_vport_context_in, in, opcode,
1043                  MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
1044
1045         if (vport_num) {
1046                 if (is_group_manager) {
1047                         MLX5_SET(query_hca_vport_context_in, in, other_vport,
1048                                  1);
1049                         MLX5_SET(query_hca_vport_context_in, in, vport_number,
1050                                  vport_num);
1051                 } else {
1052                         return -EPERM;
1053                 }
1054         }
1055
1056         if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1057                 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1058
1059         return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
1060 }
1061
1062 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1063                                            u64 *system_image_guid)
1064 {
1065         u32 *out;
1066         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1067         int err;
1068
1069         out = mlx5_vzalloc(outlen);
1070         if (!out)
1071                 return -ENOMEM;
1072
1073         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1074         if (err)
1075                 goto out;
1076
1077         *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1078                                         hca_vport_context.system_image_guid);
1079
1080 out:
1081         kvfree(out);
1082         return err;
1083 }
1084 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1085
1086 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1087 {
1088         u32 *out;
1089         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1090         int err;
1091
1092         out = mlx5_vzalloc(outlen);
1093         if (!out)
1094                 return -ENOMEM;
1095
1096         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1097         if (err)
1098                 goto out;
1099
1100         *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1101                                 hca_vport_context.node_guid);
1102
1103 out:
1104         kvfree(out);
1105         return err;
1106 }
1107 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1108
1109 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1110                                           u64 *port_guid)
1111 {
1112         u32 *out;
1113         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1114         int err;
1115
1116         out = mlx5_vzalloc(outlen);
1117         if (!out)
1118                 return -ENOMEM;
1119
1120         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1121         if (err)
1122                 goto out;
1123
1124         *port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1125                                 hca_vport_context.port_guid);
1126
1127 out:
1128         kvfree(out);
1129         return err;
1130 }
1131
1132 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1133                              u16 vport_num, u16 gid_index, union ib_gid *gid)
1134 {
1135         int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1136         int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1137         int is_group_manager;
1138         void *out = NULL;
1139         void *in = NULL;
1140         union ib_gid *tmp;
1141         int tbsz;
1142         int nout;
1143         int err;
1144
1145         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1146         tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1147
1148         if (gid_index > tbsz && gid_index != 0xffff)
1149                 return -EINVAL;
1150
1151         if (gid_index == 0xffff)
1152                 nout = tbsz;
1153         else
1154                 nout = 1;
1155
1156         out_sz += nout * sizeof(*gid);
1157
1158         in = mlx5_vzalloc(in_sz);
1159         out = mlx5_vzalloc(out_sz);
1160         if (!in || !out) {
1161                 err = -ENOMEM;
1162                 goto out;
1163         }
1164
1165         MLX5_SET(query_hca_vport_gid_in, in, opcode,
1166                  MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1167         if (vport_num) {
1168                 if (is_group_manager) {
1169                         MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1170                                  vport_num);
1171                         MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1172                 } else {
1173                         err = -EPERM;
1174                         goto out;
1175                 }
1176         }
1177
1178         MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1179
1180         if (MLX5_CAP_GEN(dev, num_ports) == 2)
1181                 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1182
1183         err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1184         if (err)
1185                 goto out;
1186
1187         tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1188         gid->global.subnet_prefix = tmp->global.subnet_prefix;
1189         gid->global.interface_id = tmp->global.interface_id;
1190
1191 out:
1192         kvfree(in);
1193         kvfree(out);
1194         return err;
1195 }
1196 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1197
1198 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1199                               u8 port_num, u16 vf_num, u16 pkey_index,
1200                               u16 *pkey)
1201 {
1202         int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1203         int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1204         int is_group_manager;
1205         void *out = NULL;
1206         void *in = NULL;
1207         void *pkarr;
1208         int nout;
1209         int tbsz;
1210         int err;
1211         int i;
1212
1213         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1214
1215         tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1216         if (pkey_index > tbsz && pkey_index != 0xffff)
1217                 return -EINVAL;
1218
1219         if (pkey_index == 0xffff)
1220                 nout = tbsz;
1221         else
1222                 nout = 1;
1223
1224         out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1225
1226         in = kzalloc(in_sz, GFP_KERNEL);
1227         out = kzalloc(out_sz, GFP_KERNEL);
1228
1229         MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1230                  MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1231         if (other_vport) {
1232                 if (is_group_manager) {
1233                         MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1234                                  vf_num);
1235                         MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1236                 } else {
1237                         err = -EPERM;
1238                         goto out;
1239                 }
1240         }
1241         MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1242
1243         if (MLX5_CAP_GEN(dev, num_ports) == 2)
1244                 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1245
1246         err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1247         if (err)
1248                 goto out;
1249
1250         pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1251         for (i = 0; i < nout; i++, pkey++,
1252              pkarr += MLX5_ST_SZ_BYTES(pkey))
1253                 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1254
1255 out:
1256         kfree(in);
1257         kfree(out);
1258         return err;
1259 }
1260 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1261
1262 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1263                                          int *min_header)
1264 {
1265         u32 *out;
1266         u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1267         int err;
1268
1269         out = mlx5_vzalloc(outlen);
1270         if (!out)
1271                 return -ENOMEM;
1272
1273         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1274         if (err)
1275                 goto out;
1276
1277         *min_header = MLX5_GET(query_hca_vport_context_out, out,
1278                                hca_vport_context.min_wqe_inline_mode);
1279
1280 out:
1281         kvfree(out);
1282         return err;
1283 }
1284
1285 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1286                                              u16 vport, void *in, int inlen)
1287 {
1288         u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
1289         int err;
1290
1291         MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1292         if (vport)
1293                 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1294
1295         MLX5_SET(modify_esw_vport_context_in, in, opcode,
1296                  MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1297
1298         err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
1299         if (err)
1300                 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1301
1302         return err;
1303 }
1304
1305 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1306                                 u8 insert_mode, u8 strip_mode,
1307                                 u16 vlan, u8 cfi, u8 pcp)
1308 {
1309         u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1310
1311         memset(in, 0, sizeof(in));
1312
1313         if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1314                 MLX5_SET(modify_esw_vport_context_in, in,
1315                          esw_vport_context.cvlan_cfi, cfi);
1316                 MLX5_SET(modify_esw_vport_context_in, in,
1317                          esw_vport_context.cvlan_pcp, pcp);
1318                 MLX5_SET(modify_esw_vport_context_in, in,
1319                          esw_vport_context.cvlan_id, vlan);
1320         }
1321
1322         MLX5_SET(modify_esw_vport_context_in, in,
1323                  esw_vport_context.vport_cvlan_insert, insert_mode);
1324
1325         MLX5_SET(modify_esw_vport_context_in, in,
1326                  esw_vport_context.vport_cvlan_strip, strip_mode);
1327
1328         MLX5_SET(modify_esw_vport_context_in, in, field_select,
1329                  MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1330                  MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1331
1332         return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1333 }
1334 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1335
1336 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1337 {
1338         u32 *out;
1339         u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1340         int err;
1341
1342         out = mlx5_vzalloc(outlen);
1343         if (!out)
1344                 return -ENOMEM;
1345
1346         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1347         if (err)
1348                 goto out;
1349
1350         *mtu = MLX5_GET(query_nic_vport_context_out, out,
1351                         nic_vport_context.mtu);
1352
1353 out:
1354         kvfree(out);
1355         return err;
1356 }
1357 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1358
1359 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1360 {
1361         u32 *in;
1362         u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1363         int err;
1364
1365         in = mlx5_vzalloc(inlen);
1366         if (!in)
1367                 return -ENOMEM;
1368
1369         MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1370         MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1371
1372         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1373
1374         kvfree(in);
1375         return err;
1376 }
1377 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1378
1379 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1380                                            int *min_header)
1381 {
1382         u32 *out;
1383         u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1384         int err;
1385
1386         out = mlx5_vzalloc(outlen);
1387         if (!out)
1388                 return -ENOMEM;
1389
1390         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1391         if (err)
1392                 goto out;
1393
1394         *min_header = MLX5_GET(query_nic_vport_context_out, out,
1395                                nic_vport_context.min_wqe_inline_mode);
1396
1397 out:
1398         kvfree(out);
1399         return err;
1400 }
1401
1402 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1403                                   u8 vport, int min_header)
1404 {
1405         u32 *in;
1406         u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1407         int err;
1408
1409         in = mlx5_vzalloc(inlen);
1410         if (!in)
1411                 return -ENOMEM;
1412
1413         MLX5_SET(modify_nic_vport_context_in, in,
1414                  field_select.min_wqe_inline_mode, 1);
1415         MLX5_SET(modify_nic_vport_context_in, in,
1416                  nic_vport_context.min_wqe_inline_mode, min_header);
1417         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1418         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1419
1420         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1421
1422         kvfree(in);
1423         return err;
1424 }
1425 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1426
1427 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1428 {
1429         switch (MLX5_CAP_GEN(dev, port_type)) {
1430         case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1431                 return mlx5_query_hca_min_wqe_header(dev, min_header);
1432
1433         case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1434                 return mlx5_query_vport_min_wqe_header(dev, min_header);
1435
1436         default:
1437                 return -EINVAL;
1438         }
1439 }
1440 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1441
1442 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1443                                  u16 vport,
1444                                  int *promisc_uc,
1445                                  int *promisc_mc,
1446                                  int *promisc_all)
1447 {
1448         u32 *out;
1449         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1450         int err;
1451
1452         out = kzalloc(outlen, GFP_KERNEL);
1453         if (!out)
1454                 return -ENOMEM;
1455
1456         err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1457         if (err)
1458                 goto out;
1459
1460         *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1461                                nic_vport_context.promisc_uc);
1462         *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1463                                nic_vport_context.promisc_mc);
1464         *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1465                                 nic_vport_context.promisc_all);
1466
1467 out:
1468         kfree(out);
1469         return err;
1470 }
1471 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1472
1473 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1474                                   int promisc_uc,
1475                                   int promisc_mc,
1476                                   int promisc_all)
1477 {
1478         void *in;
1479         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1480         int err;
1481
1482         in = mlx5_vzalloc(inlen);
1483         if (!in) {
1484                 mlx5_core_err(mdev, "failed to allocate inbox\n");
1485                 return -ENOMEM;
1486         }
1487
1488         MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1489         MLX5_SET(modify_nic_vport_context_in, in,
1490                  nic_vport_context.promisc_uc, promisc_uc);
1491         MLX5_SET(modify_nic_vport_context_in, in,
1492                  nic_vport_context.promisc_mc, promisc_mc);
1493         MLX5_SET(modify_nic_vport_context_in, in,
1494                  nic_vport_context.promisc_all, promisc_all);
1495
1496         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1497         kvfree(in);
1498         return err;
1499 }
1500 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1501
1502 int mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev *mdev,
1503                                    enum mlx5_local_lb_selection selection,
1504                                    u8 value)
1505 {
1506         void *in;
1507         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1508         int err;
1509
1510         in = mlx5_vzalloc(inlen);
1511         if (!in) {
1512                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
1513                 return -ENOMEM;
1514         }
1515
1516         MLX5_SET(modify_nic_vport_context_in, in, vport_number, 0);
1517
1518         if (selection == MLX5_LOCAL_MC_LB) {
1519                 MLX5_SET(modify_nic_vport_context_in, in,
1520                          field_select.disable_mc_local_lb, 1);
1521                 MLX5_SET(modify_nic_vport_context_in, in,
1522                          nic_vport_context.disable_mc_local_lb,
1523                          value);
1524         } else {
1525                 MLX5_SET(modify_nic_vport_context_in, in,
1526                          field_select.disable_uc_local_lb, 1);
1527                 MLX5_SET(modify_nic_vport_context_in, in,
1528                          nic_vport_context.disable_uc_local_lb,
1529                          value);
1530         }
1531
1532         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1533
1534         kvfree(in);
1535         return err;
1536 }
1537 EXPORT_SYMBOL_GPL(mlx5_nic_vport_modify_local_lb);
1538
1539 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev,
1540                                   enum mlx5_local_lb_selection selection,
1541                                   u8 *value)
1542 {
1543         void *out;
1544         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1545         int err;
1546
1547         out = kzalloc(outlen, GFP_KERNEL);
1548         if (!out)
1549                 return -ENOMEM;
1550
1551         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1552         if (err)
1553                 goto done;
1554
1555         if (selection == MLX5_LOCAL_MC_LB)
1556                 *value = MLX5_GET(query_nic_vport_context_out, out,
1557                                   nic_vport_context.disable_mc_local_lb);
1558         else
1559                 *value = MLX5_GET(query_nic_vport_context_out, out,
1560                                   nic_vport_context.disable_uc_local_lb);
1561
1562 done:
1563         kfree(out);
1564         return err;
1565 }
1566 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
1567
1568 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1569                              u8 port_num, u16 vport_num,
1570                              void *out, int out_size)
1571 {
1572         int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1573         int is_group_manager;
1574         void *in;
1575         int err;
1576
1577         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1578
1579         in = mlx5_vzalloc(in_sz);
1580         if (!in)
1581                 return -ENOMEM;
1582
1583         MLX5_SET(query_vport_counter_in, in, opcode,
1584                  MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1585         if (vport_num) {
1586                 if (is_group_manager) {
1587                         MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1588                         MLX5_SET(query_vport_counter_in, in, vport_number,
1589                                  vport_num);
1590                 } else {
1591                         err = -EPERM;
1592                         goto ex;
1593                 }
1594         }
1595         if (MLX5_CAP_GEN(dev, num_ports) == 2)
1596                 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1597
1598         err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
1599
1600 ex:
1601         kvfree(in);
1602         return err;
1603 }
1604 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1605
1606 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1607                             struct mlx5_vport_counters *vc)
1608 {
1609         int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1610         void *out;
1611         int err;
1612
1613         out = mlx5_vzalloc(out_sz);
1614         if (!out)
1615                 return -ENOMEM;
1616
1617         err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1618         if (err)
1619                 goto ex;
1620
1621         vc->received_errors.packets =
1622                 MLX5_GET64(query_vport_counter_out,
1623                            out, received_errors.packets);
1624         vc->received_errors.octets =
1625                 MLX5_GET64(query_vport_counter_out,
1626                            out, received_errors.octets);
1627         vc->transmit_errors.packets =
1628                 MLX5_GET64(query_vport_counter_out,
1629                            out, transmit_errors.packets);
1630         vc->transmit_errors.octets =
1631                 MLX5_GET64(query_vport_counter_out,
1632                            out, transmit_errors.octets);
1633         vc->received_ib_unicast.packets =
1634                 MLX5_GET64(query_vport_counter_out,
1635                            out, received_ib_unicast.packets);
1636         vc->received_ib_unicast.octets =
1637                 MLX5_GET64(query_vport_counter_out,
1638                            out, received_ib_unicast.octets);
1639         vc->transmitted_ib_unicast.packets =
1640                 MLX5_GET64(query_vport_counter_out,
1641                            out, transmitted_ib_unicast.packets);
1642         vc->transmitted_ib_unicast.octets =
1643                 MLX5_GET64(query_vport_counter_out,
1644                            out, transmitted_ib_unicast.octets);
1645         vc->received_ib_multicast.packets =
1646                 MLX5_GET64(query_vport_counter_out,
1647                            out, received_ib_multicast.packets);
1648         vc->received_ib_multicast.octets =
1649                 MLX5_GET64(query_vport_counter_out,
1650                            out, received_ib_multicast.octets);
1651         vc->transmitted_ib_multicast.packets =
1652                 MLX5_GET64(query_vport_counter_out,
1653                            out, transmitted_ib_multicast.packets);
1654         vc->transmitted_ib_multicast.octets =
1655                 MLX5_GET64(query_vport_counter_out,
1656                            out, transmitted_ib_multicast.octets);
1657         vc->received_eth_broadcast.packets =
1658                 MLX5_GET64(query_vport_counter_out,
1659                            out, received_eth_broadcast.packets);
1660         vc->received_eth_broadcast.octets =
1661                 MLX5_GET64(query_vport_counter_out,
1662                            out, received_eth_broadcast.octets);
1663         vc->transmitted_eth_broadcast.packets =
1664                 MLX5_GET64(query_vport_counter_out,
1665                            out, transmitted_eth_broadcast.packets);
1666         vc->transmitted_eth_broadcast.octets =
1667                 MLX5_GET64(query_vport_counter_out,
1668                            out, transmitted_eth_broadcast.octets);
1669         vc->received_eth_unicast.octets =
1670                 MLX5_GET64(query_vport_counter_out,
1671                            out, received_eth_unicast.octets);
1672         vc->received_eth_unicast.packets =
1673                 MLX5_GET64(query_vport_counter_out,
1674                            out, received_eth_unicast.packets);
1675         vc->transmitted_eth_unicast.octets =
1676                 MLX5_GET64(query_vport_counter_out,
1677                            out, transmitted_eth_unicast.octets);
1678         vc->transmitted_eth_unicast.packets =
1679                 MLX5_GET64(query_vport_counter_out,
1680                            out, transmitted_eth_unicast.packets);
1681         vc->received_eth_multicast.octets =
1682                 MLX5_GET64(query_vport_counter_out,
1683                            out, received_eth_multicast.octets);
1684         vc->received_eth_multicast.packets =
1685                 MLX5_GET64(query_vport_counter_out,
1686                            out, received_eth_multicast.packets);
1687         vc->transmitted_eth_multicast.octets =
1688                 MLX5_GET64(query_vport_counter_out,
1689                            out, transmitted_eth_multicast.octets);
1690         vc->transmitted_eth_multicast.packets =
1691                 MLX5_GET64(query_vport_counter_out,
1692                            out, transmitted_eth_multicast.packets);
1693
1694 ex:
1695         kvfree(out);
1696         return err;
1697 }
1698
1699 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1700                                        u64 *sys_image_guid)
1701 {
1702         switch (MLX5_CAP_GEN(dev, port_type)) {
1703         case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1704                 return mlx5_query_hca_vport_system_image_guid(dev,
1705                                                               sys_image_guid);
1706
1707         case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1708                 return mlx5_query_nic_vport_system_image_guid(dev,
1709                                                               sys_image_guid);
1710
1711         default:
1712                 return -EINVAL;
1713         }
1714 }
1715 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1716
1717 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1718 {
1719         switch (MLX5_CAP_GEN(dev, port_type)) {
1720         case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1721                 return mlx5_query_hca_vport_node_guid(dev, node_guid);
1722
1723         case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1724                 return mlx5_query_nic_vport_node_guid(dev, node_guid);
1725
1726         default:
1727                 return -EINVAL;
1728         }
1729 }
1730 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1731
1732 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1733 {
1734         switch (MLX5_CAP_GEN(dev, port_type)) {
1735         case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1736                 return mlx5_query_hca_vport_port_guid(dev, port_guid);
1737
1738         case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1739                 return mlx5_query_nic_vport_port_guid(dev, port_guid);
1740
1741         default:
1742                 return -EINVAL;
1743         }
1744 }
1745 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1746
1747 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1748 {
1749         u32 *out;
1750         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1751         int err;
1752
1753         out = mlx5_vzalloc(outlen);
1754         if (!out)
1755                 return -ENOMEM;
1756
1757         err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1758         if (err)
1759                 goto out;
1760
1761         *vport_state = MLX5_GET(query_hca_vport_context_out, out,
1762                                 hca_vport_context.vport_state);
1763
1764 out:
1765         kvfree(out);
1766         return err;
1767 }
1768 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1769
1770 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1771                              u8 port_num, void *out, size_t sz)
1772 {
1773         u32 *in;
1774         int err;
1775
1776         in  = mlx5_vzalloc(sz);
1777         if (!in) {
1778                 err = -ENOMEM;
1779                 return err;
1780         }
1781
1782         MLX5_SET(ppcnt_reg, in, local_port, port_num);
1783
1784         MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
1785         err = mlx5_core_access_reg(dev, in, sz, out,
1786                                    sz, MLX5_REG_PPCNT, 0, 0);
1787
1788         kvfree(in);
1789         return err;
1790 }