]> CyberLeo.Net >> Repos - FreeBSD/releng/10.3.git/blob - sys/dev/mlx5/mlx5_core/mlx5_vport.c
- Copy stable/10@296371 to releng/10.3 in preparation for 10.3-RC1
[FreeBSD/releng/10.3.git] / sys / dev / mlx5 / mlx5_core / mlx5_vport.c
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
32
33 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
34 {
35         u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
36         u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
37         int err;
38
39         memset(in, 0, sizeof(in));
40
41         MLX5_SET(query_vport_state_in, in, opcode,
42                  MLX5_CMD_OP_QUERY_VPORT_STATE);
43         MLX5_SET(query_vport_state_in, in, op_mod, opmod);
44
45         err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
46                                          sizeof(out));
47         if (err)
48                 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
49
50         return MLX5_GET(query_vport_state_out, out, state);
51 }
52 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
53
54 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u32 vport,
55                                         u32 *out, int outlen)
56 {
57         u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
58
59         memset(in, 0, sizeof(in));
60
61         MLX5_SET(query_nic_vport_context_in, in, opcode,
62                  MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
63
64         MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
65         if (vport)
66                 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
67
68         return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
69 }
70
71 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int *counter_set_id)
72 {
73         u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
74         u32 out[MLX5_ST_SZ_DW(alloc_q_counter_in)];
75         int err;
76
77         memset(in, 0, sizeof(in));
78         memset(out, 0, sizeof(out));
79
80         MLX5_SET(alloc_q_counter_in, in, opcode,
81                  MLX5_CMD_OP_ALLOC_Q_COUNTER);
82
83         err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
84                                          out, sizeof(out));
85
86         if (err)
87                 return err;
88
89         *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
90                                    counter_set_id);
91         return err;
92 }
93
94 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
95                                  int counter_set_id)
96 {
97         u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
98         u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
99
100         memset(in, 0, sizeof(in));
101         memset(out, 0, sizeof(out));
102
103         MLX5_SET(dealloc_q_counter_in, in, opcode,
104                  MLX5_CMD_OP_DEALLOC_Q_COUNTER);
105         MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
106                  counter_set_id);
107
108         return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
109                                           out, sizeof(out));
110 }
111
112 static int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
113                                       int counter_set_id,
114                                       int reset,
115                                       void *out,
116                                       int out_size)
117 {
118         u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
119
120         memset(in, 0, sizeof(in));
121
122         MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
123         MLX5_SET(query_q_counter_in, in, clear, reset);
124         MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
125
126         return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
127                                           out, out_size);
128 }
129
130 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
131                                       int counter_set_id,
132                                       u32 *out_of_rx_buffer)
133 {
134         u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
135         int err;
136
137         memset(out, 0, sizeof(out));
138
139         err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
140                                          sizeof(out));
141
142         if (err)
143                 return err;
144
145         *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
146                                      out_of_buffer);
147         return err;
148 }
149
150 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
151                                      u32 vport, u8 *addr)
152 {
153         u32 *out;
154         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
155         u8 *out_addr;
156         int err;
157
158         out = mlx5_vzalloc(outlen);
159         if (!out)
160                 return -ENOMEM;
161
162         out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
163                                 nic_vport_context.permanent_address);
164
165         err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
166         if (err)
167                 goto out;
168
169         ether_addr_copy(addr, &out_addr[2]);
170
171 out:
172         kvfree(out);
173         return err;
174 }
175 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
176
177 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
178                                            u64 *system_image_guid)
179 {
180         u32 *out;
181         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
182         int err;
183
184         out = mlx5_vzalloc(outlen);
185         if (!out)
186                 return -ENOMEM;
187
188         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
189         if (err)
190                 goto out;
191
192         *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
193                                         nic_vport_context.system_image_guid);
194 out:
195         kvfree(out);
196         return err;
197 }
198 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
199
200 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
201 {
202         u32 *out;
203         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
204         int err;
205
206         out = mlx5_vzalloc(outlen);
207         if (!out)
208                 return -ENOMEM;
209
210         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
211         if (err)
212                 goto out;
213
214         *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
215                                 nic_vport_context.node_guid);
216
217 out:
218         kvfree(out);
219         return err;
220 }
221 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
222
223 int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev, u64 *port_guid)
224 {
225         u32 *out;
226         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
227         int err;
228
229         out = mlx5_vzalloc(outlen);
230         if (!out)
231                 return -ENOMEM;
232
233         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
234         if (err)
235                 goto out;
236
237         *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
238                                 nic_vport_context.port_guid);
239
240 out:
241         kvfree(out);
242         return err;
243 }
244 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_port_guid);
245
246 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
247                                         u16 *qkey_viol_cntr)
248 {
249         u32 *out;
250         int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
251         int err;
252
253         out = mlx5_vzalloc(outlen);
254         if (!out)
255                 return -ENOMEM;
256
257         err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
258         if (err)
259                 goto out;
260
261         *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
262                                 nic_vport_context.qkey_violation_counter);
263
264 out:
265         kvfree(out);
266         return err;
267 }
268 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
269
270 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
271                                          int inlen)
272 {
273         u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
274
275         MLX5_SET(modify_nic_vport_context_in, in, opcode,
276                  MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
277
278         memset(out, 0, sizeof(out));
279         return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
280 }
281
282 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
283                                               int enable_disable)
284 {
285         void *in;
286         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
287         int err;
288
289         in = mlx5_vzalloc(inlen);
290         if (!in) {
291                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
292                 return -ENOMEM;
293         }
294
295         MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
296         MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
297                  enable_disable);
298
299         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
300
301         kvfree(in);
302
303         return err;
304 }
305
306 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
307                                    bool other_vport, u8 *addr)
308 {
309         void *in;
310         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
311                   + MLX5_ST_SZ_BYTES(mac_address_layout);
312         u8  *mac_layout;
313         u8  *mac_ptr;
314         int err;
315
316         in = mlx5_vzalloc(inlen);
317         if (!in) {
318                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
319                 return -ENOMEM;
320         }
321
322         MLX5_SET(modify_nic_vport_context_in, in,
323                  opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
324         MLX5_SET(modify_nic_vport_context_in, in,
325                  vport_number, vport);
326         MLX5_SET(modify_nic_vport_context_in, in,
327                  other_vport, other_vport);
328         MLX5_SET(modify_nic_vport_context_in, in,
329                  field_select.addresses_list, 1);
330         MLX5_SET(modify_nic_vport_context_in, in,
331                  nic_vport_context.allowed_list_type,
332                  MLX5_NIC_VPORT_LIST_TYPE_UC);
333         MLX5_SET(modify_nic_vport_context_in, in,
334                  nic_vport_context.allowed_list_size, 1);
335
336         mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
337                 nic_vport_context.current_uc_mac_address);
338         mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
339                 mac_addr_47_32);
340         ether_addr_copy(mac_ptr, addr);
341
342         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
343
344         kvfree(in);
345
346         return err;
347 }
348 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
349
350 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
351                                  u16 *vlan_list, int list_len)
352 {
353         void *in, *ctx;
354         int i, err;
355         int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
356                 + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
357
358         int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
359
360         if (list_len > max_list_size) {
361                 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
362                                list_len, max_list_size);
363                 return -ENOSPC;
364         }
365
366         in = mlx5_vzalloc(inlen);
367         if (!in) {
368                 mlx5_core_warn(dev, "failed to allocate inbox\n");
369                 return -ENOMEM;
370         }
371
372         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
373         if (vport)
374                 MLX5_SET(modify_nic_vport_context_in, in,
375                          other_vport, 1);
376         MLX5_SET(modify_nic_vport_context_in, in,
377                  field_select.addresses_list, 1);
378
379         ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
380
381         MLX5_SET(nic_vport_context, ctx, allowed_list_type,
382                  MLX5_NIC_VPORT_LIST_TYPE_VLAN);
383         MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
384
385         for (i = 0; i < list_len; i++) {
386                 u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
387                                          current_uc_mac_address[i]);
388                 MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
389         }
390
391         err = mlx5_modify_nic_vport_context(dev, in, inlen);
392
393         kvfree(in);
394         return err;
395 }
396 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
397
398 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
399                                u64 *addr_list, size_t addr_list_len)
400 {
401         void *in, *ctx;
402         int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
403                   + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
404         int err;
405         size_t i;
406         int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
407
408         if ((int)addr_list_len > max_list_sz) {
409                 mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
410                                (int)addr_list_len, max_list_sz);
411                 return -ENOSPC;
412         }
413
414         in = mlx5_vzalloc(inlen);
415         if (!in) {
416                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
417                 return -ENOMEM;
418         }
419
420         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
421         if (vport)
422                 MLX5_SET(modify_nic_vport_context_in, in,
423                          other_vport, 1);
424         MLX5_SET(modify_nic_vport_context_in, in,
425                  field_select.addresses_list, 1);
426
427         ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
428
429         MLX5_SET(nic_vport_context, ctx, allowed_list_type,
430                  MLX5_NIC_VPORT_LIST_TYPE_MC);
431         MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
432
433         for (i = 0; i < addr_list_len; i++) {
434                 u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
435                                                   current_uc_mac_address[i]);
436                 u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
437                                                  mac_addr_47_32);
438                 ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
439         }
440
441         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
442
443         kvfree(in);
444
445         return err;
446 }
447 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
448
449 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
450                                bool promisc_mc, bool promisc_uc,
451                                bool promisc_all)
452 {
453         u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
454         u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
455                                nic_vport_context);
456
457         memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
458
459         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
460         if (vport)
461                 MLX5_SET(modify_nic_vport_context_in, in,
462                          other_vport, 1);
463         MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
464         if (promisc_mc)
465                 MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
466         if (promisc_uc)
467                 MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
468         if (promisc_all)
469                 MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
470
471         return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
472 }
473 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
474 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
475                                      u8 *addr)
476 {
477         void *in;
478         int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
479         u8  *mac_ptr;
480         int err;
481
482         in = mlx5_vzalloc(inlen);
483         if (!in) {
484                 mlx5_core_warn(mdev, "failed to allocate inbox\n");
485                 return -ENOMEM;
486         }
487
488         MLX5_SET(modify_nic_vport_context_in, in,
489                  opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
490         MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
491         MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
492         MLX5_SET(modify_nic_vport_context_in, in,
493                  field_select.permanent_address, 1);
494         mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
495                 nic_vport_context.permanent_address.mac_addr_47_32);
496         ether_addr_copy(mac_ptr, addr);
497
498         err = mlx5_modify_nic_vport_context(mdev, in, inlen);
499
500         kvfree(in);
501
502         return err;
503 }
504 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
505
506 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
507 {
508         return mlx5_nic_vport_enable_disable_roce(mdev, 1);
509 }
510 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
511
512 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
513 {
514         return mlx5_nic_vport_enable_disable_roce(mdev, 0);
515 }
516 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
517
518 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
519                                  u8 port_num, u8 vport_num, u32 *out,
520                                  int outlen)
521 {
522         u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
523         int is_group_manager;
524
525         is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
526
527         memset(in, 0, sizeof(in));
528
529         MLX5_SET(query_hca_vport_context_in, in, opcode,
530                  MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
531
532         if (vport_num) {
533                 if (is_group_manager) {
534                         MLX5_SET(query_hca_vport_context_in, in, other_vport,
535                                  1);
536                         MLX5_SET(query_hca_vport_context_in, in, vport_number,
537                                  vport_num);
538                 } else {
539                         return -EPERM;
540                 }
541         }
542
543         if (MLX5_CAP_GEN(mdev, num_ports) == 2)
544                 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
545
546         return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
547 }
548
549 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
550                                            u64 *system_image_guid)
551 {
552         u32 *out;
553         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
554         int err;
555
556         out = mlx5_vzalloc(outlen);
557         if (!out)
558                 return -ENOMEM;
559
560         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
561         if (err)
562                 goto out;
563
564         *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
565                                         hca_vport_context.system_image_guid);
566
567 out:
568         kvfree(out);
569         return err;
570 }
571 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
572
573 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
574 {
575         u32 *out;
576         int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
577         int err;
578
579         out = mlx5_vzalloc(outlen);
580         if (!out)
581                 return -ENOMEM;
582
583         err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
584         if (err)
585                 goto out;
586
587         *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
588                                 hca_vport_context.node_guid);
589
590 out:
591         kvfree(out);
592         return err;
593 }
594 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
595
596 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
597                              u16 vport_num, u16 gid_index, union ib_gid *gid)
598 {
599         int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
600         int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
601         int is_group_manager;
602         void *out = NULL;
603         void *in = NULL;
604         union ib_gid *tmp;
605         int tbsz;
606         int nout;
607         int err;
608
609         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
610         tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
611
612         if (gid_index > tbsz && gid_index != 0xffff)
613                 return -EINVAL;
614
615         if (gid_index == 0xffff)
616                 nout = tbsz;
617         else
618                 nout = 1;
619
620         out_sz += nout * sizeof(*gid);
621
622         in = mlx5_vzalloc(in_sz);
623         out = mlx5_vzalloc(out_sz);
624         if (!in || !out) {
625                 err = -ENOMEM;
626                 goto out;
627         }
628
629         MLX5_SET(query_hca_vport_gid_in, in, opcode,
630                  MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
631         if (vport_num) {
632                 if (is_group_manager) {
633                         MLX5_SET(query_hca_vport_gid_in, in, vport_number,
634                                  vport_num);
635                         MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
636                 } else {
637                         err = -EPERM;
638                         goto out;
639                 }
640         }
641
642         MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
643
644         if (MLX5_CAP_GEN(dev, num_ports) == 2)
645                 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
646
647         err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
648         if (err)
649                 goto out;
650
651         err = mlx5_cmd_status_to_err_v2(out);
652         if (err)
653                 goto out;
654
655         tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
656         gid->global.subnet_prefix = tmp->global.subnet_prefix;
657         gid->global.interface_id = tmp->global.interface_id;
658
659 out:
660         kvfree(in);
661         kvfree(out);
662         return err;
663 }
664 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
665
666 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
667                               u8 port_num, u16 vf_num, u16 pkey_index,
668                               u16 *pkey)
669 {
670         int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
671         int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
672         int is_group_manager;
673         void *out = NULL;
674         void *in = NULL;
675         void *pkarr;
676         int nout;
677         int tbsz;
678         int err;
679         int i;
680
681         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
682
683         tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
684         if (pkey_index > tbsz && pkey_index != 0xffff)
685                 return -EINVAL;
686
687         if (pkey_index == 0xffff)
688                 nout = tbsz;
689         else
690                 nout = 1;
691
692         out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
693
694         in = kzalloc(in_sz, GFP_KERNEL);
695         out = kzalloc(out_sz, GFP_KERNEL);
696
697         MLX5_SET(query_hca_vport_pkey_in, in, opcode,
698                  MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
699         if (other_vport) {
700                 if (is_group_manager) {
701                         MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
702                                  vf_num);
703                         MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
704                 } else {
705                         err = -EPERM;
706                         goto out;
707                 }
708         }
709         MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
710
711         if (MLX5_CAP_GEN(dev, num_ports) == 2)
712                 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
713
714         err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
715         if (err)
716                 goto out;
717
718         err = mlx5_cmd_status_to_err_v2(out);
719         if (err)
720                 goto out;
721
722         pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
723         for (i = 0; i < nout; i++, pkey++,
724              pkarr += MLX5_ST_SZ_BYTES(pkey))
725                 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
726
727 out:
728         kfree(in);
729         kfree(out);
730         return err;
731 }
732 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
733
734 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
735                                              u16 vport, void *in, int inlen)
736 {
737         u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
738         int err;
739
740         memset(out, 0, sizeof(out));
741
742         MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
743         if (vport)
744                 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
745
746         MLX5_SET(modify_esw_vport_context_in, in, opcode,
747                  MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
748
749         err = mlx5_cmd_exec_check_status(mdev, in, inlen,
750                                          out, sizeof(out));
751         if (err)
752                 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
753
754         return err;
755 }
756
757 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
758                                 u8 insert_mode, u8 strip_mode,
759                                 u16 vlan, u8 cfi, u8 pcp)
760 {
761         u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
762
763         memset(in, 0, sizeof(in));
764
765         if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
766                 MLX5_SET(modify_esw_vport_context_in, in,
767                          esw_vport_context.cvlan_cfi, cfi);
768                 MLX5_SET(modify_esw_vport_context_in, in,
769                          esw_vport_context.cvlan_pcp, pcp);
770                 MLX5_SET(modify_esw_vport_context_in, in,
771                          esw_vport_context.cvlan_id, vlan);
772         }
773
774         MLX5_SET(modify_esw_vport_context_in, in,
775                  esw_vport_context.vport_cvlan_insert, insert_mode);
776
777         MLX5_SET(modify_esw_vport_context_in, in,
778                  esw_vport_context.vport_cvlan_strip, strip_mode);
779
780         MLX5_SET(modify_esw_vport_context_in, in, field_select,
781                  MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
782                  MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
783
784         return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
785 }
786 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
787
788 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
789                              u8 port_num, u16 vport_num,
790                              void *out, int out_size)
791 {
792         int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
793         int is_group_manager;
794         void *in;
795         int err;
796
797         is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
798
799         in = mlx5_vzalloc(in_sz);
800         if (!in)
801                 return -ENOMEM;
802
803         MLX5_SET(query_vport_counter_in, in, opcode,
804                  MLX5_CMD_OP_QUERY_VPORT_COUNTER);
805         if (vport_num) {
806                 if (is_group_manager) {
807                         MLX5_SET(query_vport_counter_in, in, other_vport, 1);
808                         MLX5_SET(query_vport_counter_in, in, vport_number,
809                                  vport_num);
810                 } else {
811                         err = -EPERM;
812                         goto ex;
813                 }
814         }
815         if (MLX5_CAP_GEN(dev, num_ports) == 2)
816                 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
817
818         err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
819         if (err)
820                 goto ex;
821         err = mlx5_cmd_status_to_err_v2(out);
822         if (err)
823                 goto ex;
824
825 ex:
826         kvfree(in);
827         return err;
828 }
829 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
830
831 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
832                             struct mlx5_vport_counters *vc)
833 {
834         int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
835         void *out;
836         int err;
837
838         out = mlx5_vzalloc(out_sz);
839         if (!out)
840                 return -ENOMEM;
841
842         err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
843         if (err)
844                 goto ex;
845
846         vc->received_errors.packets =
847                 MLX5_GET64(query_vport_counter_out,
848                            out, received_errors.packets);
849         vc->received_errors.octets =
850                 MLX5_GET64(query_vport_counter_out,
851                            out, received_errors.octets);
852         vc->transmit_errors.packets =
853                 MLX5_GET64(query_vport_counter_out,
854                            out, transmit_errors.packets);
855         vc->transmit_errors.octets =
856                 MLX5_GET64(query_vport_counter_out,
857                            out, transmit_errors.octets);
858         vc->received_ib_unicast.packets =
859                 MLX5_GET64(query_vport_counter_out,
860                            out, received_ib_unicast.packets);
861         vc->received_ib_unicast.octets =
862                 MLX5_GET64(query_vport_counter_out,
863                            out, received_ib_unicast.octets);
864         vc->transmitted_ib_unicast.packets =
865                 MLX5_GET64(query_vport_counter_out,
866                            out, transmitted_ib_unicast.packets);
867         vc->transmitted_ib_unicast.octets =
868                 MLX5_GET64(query_vport_counter_out,
869                            out, transmitted_ib_unicast.octets);
870         vc->received_ib_multicast.packets =
871                 MLX5_GET64(query_vport_counter_out,
872                            out, received_ib_multicast.packets);
873         vc->received_ib_multicast.octets =
874                 MLX5_GET64(query_vport_counter_out,
875                            out, received_ib_multicast.octets);
876         vc->transmitted_ib_multicast.packets =
877                 MLX5_GET64(query_vport_counter_out,
878                            out, transmitted_ib_multicast.packets);
879         vc->transmitted_ib_multicast.octets =
880                 MLX5_GET64(query_vport_counter_out,
881                            out, transmitted_ib_multicast.octets);
882         vc->received_eth_broadcast.packets =
883                 MLX5_GET64(query_vport_counter_out,
884                            out, received_eth_broadcast.packets);
885         vc->received_eth_broadcast.octets =
886                 MLX5_GET64(query_vport_counter_out,
887                            out, received_eth_broadcast.octets);
888         vc->transmitted_eth_broadcast.packets =
889                 MLX5_GET64(query_vport_counter_out,
890                            out, transmitted_eth_broadcast.packets);
891         vc->transmitted_eth_broadcast.octets =
892                 MLX5_GET64(query_vport_counter_out,
893                            out, transmitted_eth_broadcast.octets);
894         vc->received_eth_unicast.octets =
895                 MLX5_GET64(query_vport_counter_out,
896                            out, received_eth_unicast.octets);
897         vc->received_eth_unicast.packets =
898                 MLX5_GET64(query_vport_counter_out,
899                            out, received_eth_unicast.packets);
900         vc->transmitted_eth_unicast.octets =
901                 MLX5_GET64(query_vport_counter_out,
902                            out, transmitted_eth_unicast.octets);
903         vc->transmitted_eth_unicast.packets =
904                 MLX5_GET64(query_vport_counter_out,
905                            out, transmitted_eth_unicast.packets);
906         vc->received_eth_multicast.octets =
907                 MLX5_GET64(query_vport_counter_out,
908                            out, received_eth_multicast.octets);
909         vc->received_eth_multicast.packets =
910                 MLX5_GET64(query_vport_counter_out,
911                            out, received_eth_multicast.packets);
912         vc->transmitted_eth_multicast.octets =
913                 MLX5_GET64(query_vport_counter_out,
914                            out, transmitted_eth_multicast.octets);
915         vc->transmitted_eth_multicast.packets =
916                 MLX5_GET64(query_vport_counter_out,
917                            out, transmitted_eth_multicast.packets);
918
919 ex:
920         kvfree(out);
921         return err;
922 }