]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/mlx5/mlx5_core/mlx5_vport.c
MFC r289568, r300676, r300677, r300719, r300720 and r300721:
[FreeBSD/stable/10.git] / sys / dev / mlx5 / mlx5_core / mlx5_vport.c
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
32
33 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
34                                          int inlen);
35
36 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37                                    u16 vport, u32 *out, int outlen)
38 {
39         int err;
40         u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
41
42         memset(in, 0, sizeof(in));
43
44         MLX5_SET(query_vport_state_in, in, opcode,
45                  MLX5_CMD_OP_QUERY_VPORT_STATE);
46         MLX5_SET(query_vport_state_in, in, op_mod, opmod);
47         MLX5_SET(query_vport_state_in, in, vport_number, vport);
48         if (vport)
49                 MLX5_SET(query_vport_state_in, in, other_vport, 1);
50
51         err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
52         if (err)
53                 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
54
55         return err;
56 }
57
58 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
59 {
60         u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
61
62         _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
63
64         return MLX5_GET(query_vport_state_out, out, state);
65 }
66 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
67
68 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
69 {
70         u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
71
72         _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
73
74         return MLX5_GET(query_vport_state_out, out, admin_state);
75 }
76 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
77
78 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
79                                   u16 vport, u8 state)
80 {
81         u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
82         u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
83         int err;
84
85         memset(in, 0, sizeof(in));
86
87         MLX5_SET(modify_vport_state_in, in, opcode,
88                  MLX5_CMD_OP_MODIFY_VPORT_STATE);
89         MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
90         MLX5_SET(modify_vport_state_in, in, vport_number, vport);
91
92         if (vport)
93                 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
94
95         MLX5_SET(modify_vport_state_in, in, admin_state, state);
96
97         err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
98                                          sizeof(out));
99         if (err)
100                 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
101
102         return err;
103 }
104 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
105
106 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
107                                         u32 *out, int outlen)
108 {
109         u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
110
111         memset(in, 0, sizeof(in));
112
113         MLX5_SET(query_nic_vport_context_in, in, opcode,
114                  MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
115
116         MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
117         if (vport)
118                 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
119
120         return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
121 }
122
123 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
124                                               int client_id)
125 {
126         switch (client_id) {
127         case MLX5_INTERFACE_PROTOCOL_IB:
128                 return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
129                         MLX5_QCOUNTER_SETS_NETDEV);
130         case MLX5_INTERFACE_PROTOCOL_ETH:
131                 return MLX5_QCOUNTER_SETS_NETDEV;
132         default:
133                 mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
134                 return 0;
135         }
136 }
137
138 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
139                                int client_id, u16 *counter_set_id)
140 {
141         u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
142         u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
143         int err;
144
145         if (mdev->num_q_counter_allocated[client_id] >
146             mlx5_vport_max_q_counter_allocator(mdev, client_id))
147                 return -EINVAL;
148
149         memset(in, 0, sizeof(in));
150         memset(out, 0, sizeof(out));
151
152         MLX5_SET(alloc_q_counter_in, in, opcode,
153                  MLX5_CMD_OP_ALLOC_Q_COUNTER);
154
155         err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
156                                          out, sizeof(out));
157
158         if (!err)
159                 *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
160                                            counter_set_id);
161
162         mdev->num_q_counter_allocated[client_id]++;
163
164         return err;
165 }
166
167 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
168                                  int client_id, u16 counter_set_id)
169 {
170         u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
171         u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
172         int err;
173
174         if (mdev->num_q_counter_allocated[client_id] <= 0)
175                 return -EINVAL;
176
177         memset(in, 0, sizeof(in));
178         memset(out, 0, sizeof(out));
179
180         MLX5_SET(dealloc_q_counter_in, in, opcode,
181                  MLX5_CMD_OP_DEALLOC_Q_COUNTER);
182         MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
183                  counter_set_id);
184
185         err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
186                                          out, sizeof(out));
187
188         mdev->num_q_counter_allocated[client_id]--;
189
190         return err;
191 }
192
193 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
194                                       u16 counter_set_id,
195                                       int reset,
196                                       void *out,
197                                       int out_size)
198 {
199         u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
200
201         memset(in, 0, sizeof(in));
202
203         MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
204         MLX5_SET(query_q_counter_in, in, clear, reset);
205         MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
206
207         return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
208                                           out, out_size);
209 }
210
211 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
212                                       u16 counter_set_id,
213                                       u32 *out_of_rx_buffer)
214 {
215         u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
216         int err;
217
218         memset(out, 0, sizeof(out));
219
220         err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
221                                          sizeof(out));
222
223         if (err)
224                 return err;
225
226         *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
227                                      out_of_buffer);
228         return err;
229 }
230
231 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
232                                      u16 vport, u8 *addr)
233 {
234         u32 *out;
235         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
236         u8 *out_addr;
237         int err;
238
239         out = mlx5_vzalloc(outlen);
240         if (!out)
241                 return -ENOMEM;
242
243         out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
244                                 nic_vport_context.permanent_address);
245
246         err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
247         if (err)
248                 goto out;
249
250         ether_addr_copy(addr, &out_addr[2]);
251
252 out:
253         kvfree(out);
254         return err;
255 }
256 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
257
258 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
259                                       u16 vport, u8 *addr)
260 {
261         void *in;
262         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
263         int err;
264         void *nic_vport_ctx;
265         u8 *perm_mac;
266
267         in = mlx5_vzalloc(inlen);
268         if (!in) {
269                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
270                 return -ENOMEM;
271         }
272
273         MLX5_SET(modify_nic_vport_context_in, in,
274                  field_select.permanent_address, 1);
275         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
276
277         if (vport)
278                 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
279
280         nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
281                                      in, nic_vport_context);
282         perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
283                                 permanent_address);
284
285         ether_addr_copy(&perm_mac[2], addr);
286
287         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
288
289         kvfree(in);
290
291         return err;
292 }
293 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
294
295 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
296                                            u64 *system_image_guid)
297 {
298         u32 *out;
299         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
300         int err;
301
302         out = mlx5_vzalloc(outlen);
303         if (!out)
304                 return -ENOMEM;
305
306         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
307         if (err)
308                 goto out;
309
310         *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
311                                         nic_vport_context.system_image_guid);
312 out:
313         kvfree(out);
314         return err;
315 }
316 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
317
318 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
319 {
320         u32 *out;
321         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
322         int err;
323
324         out = mlx5_vzalloc(outlen);
325         if (!out)
326                 return -ENOMEM;
327
328         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
329         if (err)
330                 goto out;
331
332         *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
333                                 nic_vport_context.node_guid);
334
335 out:
336         kvfree(out);
337         return err;
338 }
339 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
340
341 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
342                                           u64 *port_guid)
343 {
344         u32 *out;
345         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
346         int err;
347
348         out = mlx5_vzalloc(outlen);
349         if (!out)
350                 return -ENOMEM;
351
352         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
353         if (err)
354                 goto out;
355
356         *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
357                                 nic_vport_context.port_guid);
358
359 out:
360         kvfree(out);
361         return err;
362 }
363
364 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
365                                         u16 *qkey_viol_cntr)
366 {
367         u32 *out;
368         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
369         int err;
370
371         out = mlx5_vzalloc(outlen);
372         if (!out)
373                 return -ENOMEM;
374
375         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
376         if (err)
377                 goto out;
378
379         *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
380                                 nic_vport_context.qkey_violation_counter);
381
382 out:
383         kvfree(out);
384         return err;
385 }
386 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
387
388 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
389                                          int inlen)
390 {
391         u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
392
393         MLX5_SET(modify_nic_vport_context_in, in, opcode,
394                  MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
395
396         memset(out, 0, sizeof(out));
397         return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
398 }
399
400 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
401                                               int enable_disable)
402 {
403         void *in;
404         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
405         int err;
406
407         in = mlx5_vzalloc(inlen);
408         if (!in) {
409                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
410                 return -ENOMEM;
411         }
412
413         MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
414         MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
415                  enable_disable);
416
417         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
418
419         kvfree(in);
420
421         return err;
422 }
423
424 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
425                                    bool other_vport, u8 *addr)
426 {
427         void *in;
428         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
429                   + MLX5_ST_SZ_BYTES(mac_address_layout);
430         u8  *mac_layout;
431         u8  *mac_ptr;
432         int err;
433
434         in = mlx5_vzalloc(inlen);
435         if (!in) {
436                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
437                 return -ENOMEM;
438         }
439
440         MLX5_SET(modify_nic_vport_context_in, in,
441                  opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
442         MLX5_SET(modify_nic_vport_context_in, in,
443                  vport_number, vport);
444         MLX5_SET(modify_nic_vport_context_in, in,
445                  other_vport, other_vport);
446         MLX5_SET(modify_nic_vport_context_in, in,
447                  field_select.addresses_list, 1);
448         MLX5_SET(modify_nic_vport_context_in, in,
449                  nic_vport_context.allowed_list_type,
450                  MLX5_NIC_VPORT_LIST_TYPE_UC);
451         MLX5_SET(modify_nic_vport_context_in, in,
452                  nic_vport_context.allowed_list_size, 1);
453
454         mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
455                 nic_vport_context.current_uc_mac_address);
456         mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
457                 mac_addr_47_32);
458         ether_addr_copy(mac_ptr, addr);
459
460         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
461
462         kvfree(in);
463
464         return err;
465 }
466 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
467
468 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
469                                     u32 vport, u64 node_guid)
470 {
471         void *in;
472         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
473         int err;
474         void *nic_vport_context;
475
476         if (!vport)
477                 return -EINVAL;
478         if (!MLX5_CAP_GEN(mdev, vport_group_manager))
479                 return -EPERM;
480         if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
481                 return -ENOTSUPP;
482
483         in = mlx5_vzalloc(inlen);
484         if (!in) {
485                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
486                 return -ENOMEM;
487         }
488
489         MLX5_SET(modify_nic_vport_context_in, in,
490                  field_select.node_guid, 1);
491         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
492
493         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
494
495         nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
496                                          in, nic_vport_context);
497         MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
498
499         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
500
501         kvfree(in);
502
503         return err;
504 }
505 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
506
507 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
508                                     u32 vport, u64 port_guid)
509 {
510         void *in;
511         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
512         int err;
513         void *nic_vport_context;
514
515         if (!vport)
516                 return -EINVAL;
517         if (!MLX5_CAP_GEN(mdev, vport_group_manager))
518                 return -EPERM;
519         if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
520                 return -ENOTSUPP;
521
522         in = mlx5_vzalloc(inlen);
523         if (!in) {
524                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
525                 return -ENOMEM;
526         }
527
528         MLX5_SET(modify_nic_vport_context_in, in,
529                  field_select.port_guid, 1);
530         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
531
532         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
533
534         nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
535                                          in, nic_vport_context);
536         MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
537
538         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
539
540         kvfree(in);
541
542         return err;
543 }
544 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
545
546 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
547                                  u16 *vlan_list, int list_len)
548 {
549         void *in, *ctx;
550         int i, err;
551         int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
552                 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
553
554         int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
555
556         if (list_len > max_list_size) {
557                 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
558                                list_len, max_list_size);
559                 return -ENOSPC;
560         }
561
562         in = mlx5_vzalloc(inlen);
563         if (!in) {
564                 mlx5_core_warn(dev, "failed to allocate inbox\n");
565                 return -ENOMEM;
566         }
567
568         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
569         if (vport)
570                 MLX5_SET(modify_nic_vport_context_in, in,
571                          other_vport, 1);
572         MLX5_SET(modify_nic_vport_context_in, in,
573                  field_select.addresses_list, 1);
574
575         ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
576
577         MLX5_SET(nic_vport_context, ctx, allowed_list_type,
578                  MLX5_NIC_VPORT_LIST_TYPE_VLAN);
579         MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
580
581         for (i = 0; i < list_len; i++) {
582                 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
583                                          current_uc_mac_address[i]);
584                 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
585         }
586
587         err = mlx5_modify_nic_vport_context(dev, in, inlen);
588
589         kvfree(in);
590         return err;
591 }
592 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
593
594 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
595                                u64 *addr_list, size_t addr_list_len)
596 {
597         void *in, *ctx;
598         int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
599                   + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
600         int err;
601         size_t i;
602         int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
603
604         if ((int)addr_list_len > max_list_sz) {
605                 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
606                                (int)addr_list_len, max_list_sz);
607                 return -ENOSPC;
608         }
609
610         in = mlx5_vzalloc(inlen);
611         if (!in) {
612                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
613                 return -ENOMEM;
614         }
615
616         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
617         if (vport)
618                 MLX5_SET(modify_nic_vport_context_in, in,
619                          other_vport, 1);
620         MLX5_SET(modify_nic_vport_context_in, in,
621                  field_select.addresses_list, 1);
622
623         ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
624
625         MLX5_SET(nic_vport_context, ctx, allowed_list_type,
626                  MLX5_NIC_VPORT_LIST_TYPE_MC);
627         MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
628
629         for (i = 0; i < addr_list_len; i++) {
630                 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
631                                                   current_uc_mac_address[i]);
632                 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
633                                                  mac_addr_47_32);
634                 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
635         }
636
637         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
638
639         kvfree(in);
640
641         return err;
642 }
643 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
644
645 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
646                                bool promisc_mc, bool promisc_uc,
647                                bool promisc_all)
648 {
649         u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
650         u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
651                                nic_vport_context);
652
653         memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
654
655         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
656         if (vport)
657                 MLX5_SET(modify_nic_vport_context_in, in,
658                          other_vport, 1);
659         MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
660         if (promisc_mc)
661                 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
662         if (promisc_uc)
663                 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
664         if (promisc_all)
665                 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
666
667         return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
668 }
669 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
670
671 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
672                                   u16 vport,
673                                   enum mlx5_list_type list_type,
674                                   u8 addr_list[][ETH_ALEN],
675                                   int *list_size)
676 {
677         u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
678         void *nic_vport_ctx;
679         int max_list_size;
680         int req_list_size;
681         int out_sz;
682         void *out;
683         int err;
684         int i;
685
686         req_list_size = *list_size;
687
688         max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
689                         1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
690                         1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
691
692         if (req_list_size > max_list_size) {
693                 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
694                                req_list_size, max_list_size);
695                 req_list_size = max_list_size;
696         }
697
698         out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
699                  req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
700
701         memset(in, 0, sizeof(in));
702         out = kzalloc(out_sz, GFP_KERNEL);
703         if (!out)
704                 return -ENOMEM;
705
706         MLX5_SET(query_nic_vport_context_in, in, opcode,
707                  MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
708         MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
709         MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
710
711         if (vport)
712                 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
713
714         err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
715         if (err)
716                 goto out;
717
718         nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
719                                      nic_vport_context);
720         req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
721                                  allowed_list_size);
722
723         *list_size = req_list_size;
724         for (i = 0; i < req_list_size; i++) {
725                 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
726                                         nic_vport_ctx,
727                                         current_uc_mac_address[i]) + 2;
728                 ether_addr_copy(addr_list[i], mac_addr);
729         }
730 out:
731         kfree(out);
732         return err;
733 }
734 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
735
736 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
737                                    enum mlx5_list_type list_type,
738                                    u8 addr_list[][ETH_ALEN],
739                                    int list_size)
740 {
741         u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
742         void *nic_vport_ctx;
743         int max_list_size;
744         int in_sz;
745         void *in;
746         int err;
747         int i;
748
749         max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
750                  1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
751                  1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
752
753         if (list_size > max_list_size)
754                 return -ENOSPC;
755
756         in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
757                 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
758
759         memset(out, 0, sizeof(out));
760         in = kzalloc(in_sz, GFP_KERNEL);
761         if (!in)
762                 return -ENOMEM;
763
764         MLX5_SET(modify_nic_vport_context_in, in, opcode,
765                  MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
766         MLX5_SET(modify_nic_vport_context_in, in,
767                  field_select.addresses_list, 1);
768
769         nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
770                                      nic_vport_context);
771
772         MLX5_SET(nic_vport_context, nic_vport_ctx,
773                  allowed_list_type, list_type);
774         MLX5_SET(nic_vport_context, nic_vport_ctx,
775                  allowed_list_size, list_size);
776
777         for (i = 0; i < list_size; i++) {
778                 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
779                                             nic_vport_ctx,
780                                             current_uc_mac_address[i]) + 2;
781                 ether_addr_copy(curr_mac, addr_list[i]);
782         }
783
784         err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
785         kfree(in);
786         return err;
787 }
788 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
789
790 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
791                                u16 vport,
792                                u16 vlans[],
793                                int *size)
794 {
795         u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
796         void *nic_vport_ctx;
797         int req_list_size;
798         int max_list_size;
799         int out_sz;
800         void *out;
801         int err;
802         int i;
803
804         req_list_size = *size;
805         max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
806         if (req_list_size > max_list_size) {
807                 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
808                                req_list_size, max_list_size);
809                 req_list_size = max_list_size;
810         }
811
812         out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
813                  req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
814
815         memset(in, 0, sizeof(in));
816         out = kzalloc(out_sz, GFP_KERNEL);
817         if (!out)
818                 return -ENOMEM;
819
820         MLX5_SET(query_nic_vport_context_in, in, opcode,
821                  MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
822         MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
823                  MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
824         MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
825
826         if (vport)
827                 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
828
829         err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
830         if (err)
831                 goto out;
832
833         nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
834                                      nic_vport_context);
835         req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
836                                  allowed_list_size);
837
838         *size = req_list_size;
839         for (i = 0; i < req_list_size; i++) {
840                 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
841                                                nic_vport_ctx,
842                                          current_uc_mac_address[i]);
843                 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
844         }
845 out:
846         kfree(out);
847         return err;
848 }
849 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
850
851 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
852                                 u16 vlans[],
853                                 int list_size)
854 {
855         u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
856         void *nic_vport_ctx;
857         int max_list_size;
858         int in_sz;
859         void *in;
860         int err;
861         int i;
862
863         max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
864
865         if (list_size > max_list_size)
866                 return -ENOSPC;
867
868         in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
869                 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
870
871         memset(out, 0, sizeof(out));
872         in = kzalloc(in_sz, GFP_KERNEL);
873         if (!in)
874                 return -ENOMEM;
875
876         MLX5_SET(modify_nic_vport_context_in, in, opcode,
877                  MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
878         MLX5_SET(modify_nic_vport_context_in, in,
879                  field_select.addresses_list, 1);
880
881         nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
882                                      nic_vport_context);
883
884         MLX5_SET(nic_vport_context, nic_vport_ctx,
885                  allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
886         MLX5_SET(nic_vport_context, nic_vport_ctx,
887                  allowed_list_size, list_size);
888
889         for (i = 0; i < list_size; i++) {
890                 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
891                                                nic_vport_ctx,
892                                                current_uc_mac_address[i]);
893                 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
894         }
895
896         err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
897         kfree(in);
898         return err;
899 }
900 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
901
902 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
903 {
904         u32 *out;
905         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
906         int err;
907
908         out = kzalloc(outlen, GFP_KERNEL);
909         if (!out)
910                 return -ENOMEM;
911
912         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
913         if (err)
914                 goto out;
915
916         *enable = MLX5_GET(query_nic_vport_context_out, out,
917                                 nic_vport_context.roce_en);
918
919 out:
920         kfree(out);
921         return err;
922 }
923 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
924
925 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
926                                      u8 *addr)
927 {
928         void *in;
929         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
930         u8  *mac_ptr;
931         int err;
932
933         in = mlx5_vzalloc(inlen);
934         if (!in) {
935                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
936                 return -ENOMEM;
937         }
938
939         MLX5_SET(modify_nic_vport_context_in, in,
940                  opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
941         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
942         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
943         MLX5_SET(modify_nic_vport_context_in, in,
944                  field_select.permanent_address, 1);
945         mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
946                 nic_vport_context.permanent_address.mac_addr_47_32);
947         ether_addr_copy(mac_ptr, addr);
948
949         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
950
951         kvfree(in);
952
953         return err;
954 }
955 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
956
957 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
958 {
959         return mlx5_nic_vport_enable_disable_roce(mdev, 1);
960 }
961 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
962
963 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
964 {
965         return mlx5_nic_vport_enable_disable_roce(mdev, 0);
966 }
967 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
968
969 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
970                                  u8 port_num, u8 vport_num, u32 *out,
971                                  int outlen)
972 {
973         u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
974         int is_group_manager;
975
976         is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
977
978         memset(in, 0, sizeof(in));
979
980         MLX5_SET(query_hca_vport_context_in, in, opcode,
981                  MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
982
983         if (vport_num) {
984                 if (is_group_manager) {
985                         MLX5_SET(query_hca_vport_context_in, in, other_vport,
986                                  1);
987                         MLX5_SET(query_hca_vport_context_in, in, vport_number,
988                                  vport_num);
989                 } else {
990                         return -EPERM;
991                 }
992         }
993
994         if (MLX5_CAP_GEN(mdev, num_ports) == 2)
995                 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
996
997         return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
998 }
999
1000 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1001                                            u64 *system_image_guid)
1002 {
1003         u32 *out;
1004         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1005         int err;
1006
1007         out = mlx5_vzalloc(outlen);
1008         if (!out)
1009                 return -ENOMEM;
1010
1011         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1012         if (err)
1013                 goto out;
1014
1015         *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1016                                         hca_vport_context.system_image_guid);
1017
1018 out:
1019         kvfree(out);
1020         return err;
1021 }
1022 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1023
1024 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1025 {
1026         u32 *out;
1027         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1028         int err;
1029
1030         out = mlx5_vzalloc(outlen);
1031         if (!out)
1032                 return -ENOMEM;
1033
1034         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1035         if (err)
1036                 goto out;
1037
1038         *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1039                                 hca_vport_context.node_guid);
1040
1041 out:
1042         kvfree(out);
1043         return err;
1044 }
1045 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1046
1047 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1048                                           u64 *port_guid)
1049 {
1050         u32 *out;
1051         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1052         int err;
1053
1054         out = mlx5_vzalloc(outlen);
1055         if (!out)
1056                 return -ENOMEM;
1057
1058         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1059         if (err)
1060                 goto out;
1061
1062         *port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1063                                 hca_vport_context.port_guid);
1064
1065 out:
1066         kvfree(out);
1067         return err;
1068 }
1069
1070 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1071                              u16 vport_num, u16 gid_index, union ib_gid *gid)
1072 {
1073         int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1074         int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1075         int is_group_manager;
1076         void *out = NULL;
1077         void *in = NULL;
1078         union ib_gid *tmp;
1079         int tbsz;
1080         int nout;
1081         int err;
1082
1083         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1084         tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1085
1086         if (gid_index > tbsz && gid_index != 0xffff)
1087                 return -EINVAL;
1088
1089         if (gid_index == 0xffff)
1090                 nout = tbsz;
1091         else
1092                 nout = 1;
1093
1094         out_sz += nout * sizeof(*gid);
1095
1096         in = mlx5_vzalloc(in_sz);
1097         out = mlx5_vzalloc(out_sz);
1098         if (!in || !out) {
1099                 err = -ENOMEM;
1100                 goto out;
1101         }
1102
1103         MLX5_SET(query_hca_vport_gid_in, in, opcode,
1104                  MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1105         if (vport_num) {
1106                 if (is_group_manager) {
1107                         MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1108                                  vport_num);
1109                         MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1110                 } else {
1111                         err = -EPERM;
1112                         goto out;
1113                 }
1114         }
1115
1116         MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1117
1118         if (MLX5_CAP_GEN(dev, num_ports) == 2)
1119                 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1120
1121         err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1122         if (err)
1123                 goto out;
1124
1125         err = mlx5_cmd_status_to_err_v2(out);
1126         if (err)
1127                 goto out;
1128
1129         tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1130         gid->global.subnet_prefix = tmp->global.subnet_prefix;
1131         gid->global.interface_id = tmp->global.interface_id;
1132
1133 out:
1134         kvfree(in);
1135         kvfree(out);
1136         return err;
1137 }
1138 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1139
1140 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1141                               u8 port_num, u16 vf_num, u16 pkey_index,
1142                               u16 *pkey)
1143 {
1144         int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1145         int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1146         int is_group_manager;
1147         void *out = NULL;
1148         void *in = NULL;
1149         void *pkarr;
1150         int nout;
1151         int tbsz;
1152         int err;
1153         int i;
1154
1155         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1156
1157         tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1158         if (pkey_index > tbsz && pkey_index != 0xffff)
1159                 return -EINVAL;
1160
1161         if (pkey_index == 0xffff)
1162                 nout = tbsz;
1163         else
1164                 nout = 1;
1165
1166         out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1167
1168         in = kzalloc(in_sz, GFP_KERNEL);
1169         out = kzalloc(out_sz, GFP_KERNEL);
1170
1171         MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1172                  MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1173         if (other_vport) {
1174                 if (is_group_manager) {
1175                         MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1176                                  vf_num);
1177                         MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1178                 } else {
1179                         err = -EPERM;
1180                         goto out;
1181                 }
1182         }
1183         MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1184
1185         if (MLX5_CAP_GEN(dev, num_ports) == 2)
1186                 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1187
1188         err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1189         if (err)
1190                 goto out;
1191
1192         err = mlx5_cmd_status_to_err_v2(out);
1193         if (err)
1194                 goto out;
1195
1196         pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1197         for (i = 0; i < nout; i++, pkey++,
1198              pkarr += MLX5_ST_SZ_BYTES(pkey))
1199                 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1200
1201 out:
1202         kfree(in);
1203         kfree(out);
1204         return err;
1205 }
1206 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1207
1208 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1209                                          int *min_header)
1210 {
1211         u32 *out;
1212         u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1213         int err;
1214
1215         out = mlx5_vzalloc(outlen);
1216         if (!out)
1217                 return -ENOMEM;
1218
1219         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1220         if (err)
1221                 goto out;
1222
1223         *min_header = MLX5_GET(query_hca_vport_context_out, out,
1224                                hca_vport_context.min_wqe_inline_mode);
1225
1226 out:
1227         kvfree(out);
1228         return err;
1229 }
1230
1231 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1232                                              u16 vport, void *in, int inlen)
1233 {
1234         u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
1235         int err;
1236
1237         memset(out, 0, sizeof(out));
1238
1239         MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1240         if (vport)
1241                 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1242
1243         MLX5_SET(modify_esw_vport_context_in, in, opcode,
1244                  MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1245
1246         err = mlx5_cmd_exec_check_status(mdev, in, inlen,
1247                                          out, sizeof(out));
1248         if (err)
1249                 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1250
1251         return err;
1252 }
1253
1254 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1255                                 u8 insert_mode, u8 strip_mode,
1256                                 u16 vlan, u8 cfi, u8 pcp)
1257 {
1258         u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1259
1260         memset(in, 0, sizeof(in));
1261
1262         if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1263                 MLX5_SET(modify_esw_vport_context_in, in,
1264                          esw_vport_context.cvlan_cfi, cfi);
1265                 MLX5_SET(modify_esw_vport_context_in, in,
1266                          esw_vport_context.cvlan_pcp, pcp);
1267                 MLX5_SET(modify_esw_vport_context_in, in,
1268                          esw_vport_context.cvlan_id, vlan);
1269         }
1270
1271         MLX5_SET(modify_esw_vport_context_in, in,
1272                  esw_vport_context.vport_cvlan_insert, insert_mode);
1273
1274         MLX5_SET(modify_esw_vport_context_in, in,
1275                  esw_vport_context.vport_cvlan_strip, strip_mode);
1276
1277         MLX5_SET(modify_esw_vport_context_in, in, field_select,
1278                  MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1279                  MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1280
1281         return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1282 }
1283 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1284
1285 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1286 {
1287         u32 *out;
1288         u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1289         int err;
1290
1291         out = mlx5_vzalloc(outlen);
1292         if (!out)
1293                 return -ENOMEM;
1294
1295         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1296         if (err)
1297                 goto out;
1298
1299         *mtu = MLX5_GET(query_nic_vport_context_out, out,
1300                         nic_vport_context.mtu);
1301
1302 out:
1303         kvfree(out);
1304         return err;
1305 }
1306 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1307
1308 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1309 {
1310         u32 *in;
1311         u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1312         int err;
1313
1314         in = mlx5_vzalloc(inlen);
1315         if (!in)
1316                 return -ENOMEM;
1317
1318         MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1319         MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1320
1321         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1322
1323         kvfree(in);
1324         return err;
1325 }
1326 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1327
1328 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1329                                            int *min_header)
1330 {
1331         u32 *out;
1332         u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1333         int err;
1334
1335         out = mlx5_vzalloc(outlen);
1336         if (!out)
1337                 return -ENOMEM;
1338
1339         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1340         if (err)
1341                 goto out;
1342
1343         *min_header = MLX5_GET(query_nic_vport_context_out, out,
1344                                nic_vport_context.min_wqe_inline_mode);
1345
1346 out:
1347         kvfree(out);
1348         return err;
1349 }
1350
1351 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1352                                   u8 vport, int min_header)
1353 {
1354         u32 *in;
1355         u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1356         int err;
1357
1358         in = mlx5_vzalloc(inlen);
1359         if (!in)
1360                 return -ENOMEM;
1361
1362         MLX5_SET(modify_nic_vport_context_in, in,
1363                  field_select.min_wqe_inline_mode, 1);
1364         MLX5_SET(modify_nic_vport_context_in, in,
1365                  nic_vport_context.min_wqe_inline_mode, min_header);
1366         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1367         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1368
1369         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1370
1371         kvfree(in);
1372         return err;
1373 }
1374 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1375
1376 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1377 {
1378         switch (MLX5_CAP_GEN(dev, port_type)) {
1379         case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1380                 return mlx5_query_hca_min_wqe_header(dev, min_header);
1381
1382         case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1383                 return mlx5_query_vport_min_wqe_header(dev, min_header);
1384
1385         default:
1386                 return -EINVAL;
1387         }
1388 }
1389 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1390
1391 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1392                                  u16 vport,
1393                                  int *promisc_uc,
1394                                  int *promisc_mc,
1395                                  int *promisc_all)
1396 {
1397         u32 *out;
1398         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1399         int err;
1400
1401         out = kzalloc(outlen, GFP_KERNEL);
1402         if (!out)
1403                 return -ENOMEM;
1404
1405         err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1406         if (err)
1407                 goto out;
1408
1409         *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1410                                nic_vport_context.promisc_uc);
1411         *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1412                                nic_vport_context.promisc_mc);
1413         *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1414                                 nic_vport_context.promisc_all);
1415
1416 out:
1417         kfree(out);
1418         return err;
1419 }
1420 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1421
1422 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1423                                   int promisc_uc,
1424                                   int promisc_mc,
1425                                   int promisc_all)
1426 {
1427         void *in;
1428         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1429         int err;
1430
1431         in = mlx5_vzalloc(inlen);
1432         if (!in) {
1433                 mlx5_core_err(mdev, "failed to allocate inbox\n");
1434                 return -ENOMEM;
1435         }
1436
1437         MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1438         MLX5_SET(modify_nic_vport_context_in, in,
1439                  nic_vport_context.promisc_uc, promisc_uc);
1440         MLX5_SET(modify_nic_vport_context_in, in,
1441                  nic_vport_context.promisc_mc, promisc_mc);
1442         MLX5_SET(modify_nic_vport_context_in, in,
1443                  nic_vport_context.promisc_all, promisc_all);
1444
1445         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1446         kvfree(in);
1447         return err;
1448 }
1449 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1450
1451 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1452                              u8 port_num, u16 vport_num,
1453                              void *out, int out_size)
1454 {
1455         int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1456         int is_group_manager;
1457         void *in;
1458         int err;
1459
1460         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1461
1462         in = mlx5_vzalloc(in_sz);
1463         if (!in)
1464                 return -ENOMEM;
1465
1466         MLX5_SET(query_vport_counter_in, in, opcode,
1467                  MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1468         if (vport_num) {
1469                 if (is_group_manager) {
1470                         MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1471                         MLX5_SET(query_vport_counter_in, in, vport_number,
1472                                  vport_num);
1473                 } else {
1474                         err = -EPERM;
1475                         goto ex;
1476                 }
1477         }
1478         if (MLX5_CAP_GEN(dev, num_ports) == 2)
1479                 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1480
1481         err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
1482         if (err)
1483                 goto ex;
1484         err = mlx5_cmd_status_to_err_v2(out);
1485         if (err)
1486                 goto ex;
1487
1488 ex:
1489         kvfree(in);
1490         return err;
1491 }
1492 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1493
1494 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1495                             struct mlx5_vport_counters *vc)
1496 {
1497         int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1498         void *out;
1499         int err;
1500
1501         out = mlx5_vzalloc(out_sz);
1502         if (!out)
1503                 return -ENOMEM;
1504
1505         err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1506         if (err)
1507                 goto ex;
1508
1509         vc->received_errors.packets =
1510                 MLX5_GET64(query_vport_counter_out,
1511                            out, received_errors.packets);
1512         vc->received_errors.octets =
1513                 MLX5_GET64(query_vport_counter_out,
1514                            out, received_errors.octets);
1515         vc->transmit_errors.packets =
1516                 MLX5_GET64(query_vport_counter_out,
1517                            out, transmit_errors.packets);
1518         vc->transmit_errors.octets =
1519                 MLX5_GET64(query_vport_counter_out,
1520                            out, transmit_errors.octets);
1521         vc->received_ib_unicast.packets =
1522                 MLX5_GET64(query_vport_counter_out,
1523                            out, received_ib_unicast.packets);
1524         vc->received_ib_unicast.octets =
1525                 MLX5_GET64(query_vport_counter_out,
1526                            out, received_ib_unicast.octets);
1527         vc->transmitted_ib_unicast.packets =
1528                 MLX5_GET64(query_vport_counter_out,
1529                            out, transmitted_ib_unicast.packets);
1530         vc->transmitted_ib_unicast.octets =
1531                 MLX5_GET64(query_vport_counter_out,
1532                            out, transmitted_ib_unicast.octets);
1533         vc->received_ib_multicast.packets =
1534                 MLX5_GET64(query_vport_counter_out,
1535                            out, received_ib_multicast.packets);
1536         vc->received_ib_multicast.octets =
1537                 MLX5_GET64(query_vport_counter_out,
1538                            out, received_ib_multicast.octets);
1539         vc->transmitted_ib_multicast.packets =
1540                 MLX5_GET64(query_vport_counter_out,
1541                            out, transmitted_ib_multicast.packets);
1542         vc->transmitted_ib_multicast.octets =
1543                 MLX5_GET64(query_vport_counter_out,
1544                            out, transmitted_ib_multicast.octets);
1545         vc->received_eth_broadcast.packets =
1546                 MLX5_GET64(query_vport_counter_out,
1547                            out, received_eth_broadcast.packets);
1548         vc->received_eth_broadcast.octets =
1549                 MLX5_GET64(query_vport_counter_out,
1550                            out, received_eth_broadcast.octets);
1551         vc->transmitted_eth_broadcast.packets =
1552                 MLX5_GET64(query_vport_counter_out,
1553                            out, transmitted_eth_broadcast.packets);
1554         vc->transmitted_eth_broadcast.octets =
1555                 MLX5_GET64(query_vport_counter_out,
1556                            out, transmitted_eth_broadcast.octets);
1557         vc->received_eth_unicast.octets =
1558                 MLX5_GET64(query_vport_counter_out,
1559                            out, received_eth_unicast.octets);
1560         vc->received_eth_unicast.packets =
1561                 MLX5_GET64(query_vport_counter_out,
1562                            out, received_eth_unicast.packets);
1563         vc->transmitted_eth_unicast.octets =
1564                 MLX5_GET64(query_vport_counter_out,
1565                            out, transmitted_eth_unicast.octets);
1566         vc->transmitted_eth_unicast.packets =
1567                 MLX5_GET64(query_vport_counter_out,
1568                            out, transmitted_eth_unicast.packets);
1569         vc->received_eth_multicast.octets =
1570                 MLX5_GET64(query_vport_counter_out,
1571                            out, received_eth_multicast.octets);
1572         vc->received_eth_multicast.packets =
1573                 MLX5_GET64(query_vport_counter_out,
1574                            out, received_eth_multicast.packets);
1575         vc->transmitted_eth_multicast.octets =
1576                 MLX5_GET64(query_vport_counter_out,
1577                            out, transmitted_eth_multicast.octets);
1578         vc->transmitted_eth_multicast.packets =
1579                 MLX5_GET64(query_vport_counter_out,
1580                            out, transmitted_eth_multicast.packets);
1581
1582 ex:
1583         kvfree(out);
1584         return err;
1585 }
1586
1587 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1588                                        u64 *sys_image_guid)
1589 {
1590         switch (MLX5_CAP_GEN(dev, port_type)) {
1591         case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1592                 return mlx5_query_hca_vport_system_image_guid(dev,
1593                                                               sys_image_guid);
1594
1595         case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1596                 return mlx5_query_nic_vport_system_image_guid(dev,
1597                                                               sys_image_guid);
1598
1599         default:
1600                 return -EINVAL;
1601         }
1602 }
1603 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1604
1605 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1606 {
1607         switch (MLX5_CAP_GEN(dev, port_type)) {
1608         case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1609                 return mlx5_query_hca_vport_node_guid(dev, node_guid);
1610
1611         case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1612                 return mlx5_query_nic_vport_node_guid(dev, node_guid);
1613
1614         default:
1615                 return -EINVAL;
1616         }
1617 }
1618 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1619
1620 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1621 {
1622         switch (MLX5_CAP_GEN(dev, port_type)) {
1623         case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1624                 return mlx5_query_hca_vport_port_guid(dev, port_guid);
1625
1626         case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1627                 return mlx5_query_nic_vport_port_guid(dev, port_guid);
1628
1629         default:
1630                 return -EINVAL;
1631         }
1632 }
1633 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1634
1635 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1636 {
1637         u32 *out;
1638         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1639         int err;
1640
1641         out = mlx5_vzalloc(outlen);
1642         if (!out)
1643                 return -ENOMEM;
1644
1645         err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1646         if (err)
1647                 goto out;
1648
1649         *vport_state = MLX5_GET(query_hca_vport_context_out, out,
1650                                 hca_vport_context.vport_state);
1651
1652 out:
1653         kvfree(out);
1654         return err;
1655 }
1656 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);