]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/ofed/drivers/net/mlx4/en_ethtool.c
MFC r254122, r254123, r256116, r255970, r247671, r269861, r268314, r256269,
[FreeBSD/stable/9.git] / sys / ofed / drivers / net / mlx4 / en_ethtool.c
1 /*
2  * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33
34 #include <linux/kernel.h>
35 #include <linux/ethtool.h>
36 #include <linux/netdevice.h>
37 #include <linux/mlx4/driver.h>
38 #include <linux/in.h>
39 #include <net/ip.h>
40 #include <linux/bitmap.h>
41
42 #include "mlx4_en.h"
43 #include "en_port.h"
44
45 #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
46
47 union mlx4_ethtool_flow_union {
48         struct ethtool_tcpip4_spec              tcp_ip4_spec;
49         struct ethtool_tcpip4_spec              udp_ip4_spec;
50         struct ethtool_tcpip4_spec              sctp_ip4_spec;
51         struct ethtool_ah_espip4_spec           ah_ip4_spec;
52         struct ethtool_ah_espip4_spec           esp_ip4_spec;
53         struct ethtool_usrip4_spec              usr_ip4_spec;
54         struct ethhdr                           ether_spec;
55         __u8                                    hdata[52];
56 };
57
58 struct mlx4_ethtool_flow_ext {
59         __u8            padding[2];
60         unsigned char   h_dest[ETH_ALEN];
61         __be16          vlan_etype;
62         __be16          vlan_tci;
63         __be32          data[2];
64 };
65
66 struct mlx4_ethtool_rx_flow_spec {
67         __u32           flow_type;
68         union mlx4_ethtool_flow_union h_u;
69         struct mlx4_ethtool_flow_ext h_ext;
70         union mlx4_ethtool_flow_union m_u;
71         struct mlx4_ethtool_flow_ext m_ext;
72         __u64           ring_cookie;
73         __u32           location;
74 };
75
76 struct mlx4_ethtool_rxnfc {
77         __u32                           cmd;
78         __u32                           flow_type;
79         __u64                           data;
80         struct mlx4_ethtool_rx_flow_spec        fs;
81         __u32                           rule_cnt;
82         __u32                           rule_locs[0];
83 };
84
85 #ifndef FLOW_MAC_EXT
86 #define FLOW_MAC_EXT    0x40000000
87 #endif
88
89 static void
90 mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
91 {
92         struct mlx4_en_priv *priv = netdev_priv(dev);
93         struct mlx4_en_dev *mdev = priv->mdev;
94
95         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
96         strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
97                 sizeof(drvinfo->version));
98         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
99                 "%d.%d.%d",
100                 (u16) (mdev->dev->caps.fw_ver >> 32),
101                 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
102                 (u16) (mdev->dev->caps.fw_ver & 0xffff));
103         strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
104                 sizeof(drvinfo->bus_info));
105         drvinfo->n_stats = 0;
106         drvinfo->regdump_len = 0;
107         drvinfo->eedump_len = 0;
108 }
109
110 static const char main_strings[][ETH_GSTRING_LEN] = {
111         /* packet statistics */
112         "rx_packets",
113         "rx_bytes",
114         "rx_multicast_packets",
115         "rx_broadcast_packets",
116         "rx_errors",
117         "rx_dropped",
118         "rx_length_errors",
119         "rx_over_errors",
120         "rx_crc_errors",
121         "rx_jabbers",
122         "rx_in_range_length_error",
123         "rx_out_range_length_error",
124         "rx_lt_64_bytes_packets",
125         "rx_127_bytes_packets",
126         "rx_255_bytes_packets",
127         "rx_511_bytes_packets",
128         "rx_1023_bytes_packets",
129         "rx_1518_bytes_packets",
130         "rx_1522_bytes_packets",
131         "rx_1548_bytes_packets",
132         "rx_gt_1548_bytes_packets",
133         "tx_packets",
134         "tx_bytes",
135         "tx_multicast_packets",
136         "tx_broadcast_packets",
137         "tx_errors",
138         "tx_dropped",
139         "tx_lt_64_bytes_packets",
140         "tx_127_bytes_packets",
141         "tx_255_bytes_packets",
142         "tx_511_bytes_packets",
143         "tx_1023_bytes_packets",
144         "tx_1518_bytes_packets",
145         "tx_1522_bytes_packets",
146         "tx_1548_bytes_packets",
147         "tx_gt_1548_bytes_packets",
148         "rx_prio_0_packets", "rx_prio_0_bytes",
149         "rx_prio_1_packets", "rx_prio_1_bytes",
150         "rx_prio_2_packets", "rx_prio_2_bytes",
151         "rx_prio_3_packets", "rx_prio_3_bytes",
152         "rx_prio_4_packets", "rx_prio_4_bytes",
153         "rx_prio_5_packets", "rx_prio_5_bytes",
154         "rx_prio_6_packets", "rx_prio_6_bytes",
155         "rx_prio_7_packets", "rx_prio_7_bytes",
156         "rx_novlan_packets", "rx_novlan_bytes",
157         "tx_prio_0_packets", "tx_prio_0_bytes",
158         "tx_prio_1_packets", "tx_prio_1_bytes",
159         "tx_prio_2_packets", "tx_prio_2_bytes",
160         "tx_prio_3_packets", "tx_prio_3_bytes",
161         "tx_prio_4_packets", "tx_prio_4_bytes",
162         "tx_prio_5_packets", "tx_prio_5_bytes",
163         "tx_prio_6_packets", "tx_prio_6_bytes",
164         "tx_prio_7_packets", "tx_prio_7_bytes",
165         "tx_novlan_packets", "tx_novlan_bytes",
166
167         /* flow control statistics */
168         "rx_pause_prio_0", "rx_pause_duration_prio_0",
169         "rx_pause_transition_prio_0", "tx_pause_prio_0",
170         "tx_pause_duration_prio_0", "tx_pause_transition_prio_0",
171         "rx_pause_prio_1", "rx_pause_duration_prio_1",
172         "rx_pause_transition_prio_1", "tx_pause_prio_1",
173         "tx_pause_duration_prio_1", "tx_pause_transition_prio_1",
174         "rx_pause_prio_2", "rx_pause_duration_prio_2",
175         "rx_pause_transition_prio_2", "tx_pause_prio_2",
176         "tx_pause_duration_prio_2", "tx_pause_transition_prio_2",
177         "rx_pause_prio_3", "rx_pause_duration_prio_3",
178         "rx_pause_transition_prio_3", "tx_pause_prio_3",
179         "tx_pause_duration_prio_3", "tx_pause_transition_prio_3",
180         "rx_pause_prio_4", "rx_pause_duration_prio_4",
181         "rx_pause_transition_prio_4", "tx_pause_prio_4",
182         "tx_pause_duration_prio_4", "tx_pause_transition_prio_4",
183         "rx_pause_prio_5", "rx_pause_duration_prio_5",
184         "rx_pause_transition_prio_5", "tx_pause_prio_5",
185         "tx_pause_duration_prio_5", "tx_pause_transition_prio_5",
186         "rx_pause_prio_6", "rx_pause_duration_prio_6",
187         "rx_pause_transition_prio_6", "tx_pause_prio_6",
188         "tx_pause_duration_prio_6", "tx_pause_transition_prio_6",
189         "rx_pause_prio_7", "rx_pause_duration_prio_7",
190         "rx_pause_transition_prio_7", "tx_pause_prio_7",
191         "tx_pause_duration_prio_7", "tx_pause_transition_prio_7",
192
193         /* VF statistics */
194         "rx_packets",
195         "rx_bytes",
196         "rx_multicast_packets",
197         "rx_broadcast_packets",
198         "rx_errors",
199         "rx_dropped",
200         "tx_packets",
201         "tx_bytes",
202         "tx_multicast_packets",
203         "tx_broadcast_packets",
204         "tx_errors",
205
206         /* VPort statistics */
207         "vport_rx_unicast_packets",
208         "vport_rx_unicast_bytes",
209         "vport_rx_multicast_packets",
210         "vport_rx_multicast_bytes",
211         "vport_rx_broadcast_packets",
212         "vport_rx_broadcast_bytes",
213         "vport_rx_dropped",
214         "vport_rx_errors",
215         "vport_tx_unicast_packets",
216         "vport_tx_unicast_bytes",
217         "vport_tx_multicast_packets",
218         "vport_tx_multicast_bytes",
219         "vport_tx_broadcast_packets",
220         "vport_tx_broadcast_bytes",
221         "vport_tx_errors",
222
223         /* port statistics */
224         "tx_tso_packets",
225         "tx_queue_stopped", "tx_wake_queue", "tx_timeout", "rx_alloc_failed",
226         "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
227 };
228
229 static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
230         "Interrupt Test",
231         "Link Test",
232         "Speed Test",
233         "Register Test",
234         "Loopback Test",
235 };
236
237 static u32 mlx4_en_get_msglevel(struct net_device *dev)
238 {
239         return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
240 }
241
242 static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
243 {
244         ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
245 }
246
247 static void mlx4_en_get_wol(struct net_device *netdev,
248                             struct ethtool_wolinfo *wol)
249 {
250         struct mlx4_en_priv *priv = netdev_priv(netdev);
251         int err = 0;
252         u64 config = 0;
253         u64 mask;
254
255         if ((priv->port < 1) || (priv->port > 2)) {
256                 en_err(priv, "Failed to get WoL information\n");
257                 return;
258         }
259
260         mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
261                 MLX4_DEV_CAP_FLAG_WOL_PORT2;
262
263         if (!(priv->mdev->dev->caps.flags & mask)) {
264                 wol->supported = 0;
265                 wol->wolopts = 0;
266                 return;
267         }
268
269         err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
270         if (err) {
271                 en_err(priv, "Failed to get WoL information\n");
272                 return;
273         }
274
275         if (config & MLX4_EN_WOL_MAGIC)
276                 wol->supported = WAKE_MAGIC;
277         else
278                 wol->supported = 0;
279
280         if (config & MLX4_EN_WOL_ENABLED)
281                 wol->wolopts = WAKE_MAGIC;
282         else
283                 wol->wolopts = 0;
284 }
285
286 static int mlx4_en_set_wol(struct net_device *netdev,
287                             struct ethtool_wolinfo *wol)
288 {
289         struct mlx4_en_priv *priv = netdev_priv(netdev);
290         u64 config = 0;
291         int err = 0;
292         u64 mask;
293
294         if ((priv->port < 1) || (priv->port > 2))
295                 return -EOPNOTSUPP;
296
297         mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
298                 MLX4_DEV_CAP_FLAG_WOL_PORT2;
299
300         if (!(priv->mdev->dev->caps.flags & mask))
301                 return -EOPNOTSUPP;
302
303         if (wol->supported & ~WAKE_MAGIC)
304                 return -EINVAL;
305
306         err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
307         if (err) {
308                 en_err(priv, "Failed to get WoL info, unable to modify\n");
309                 return err;
310         }
311
312         if (wol->wolopts & WAKE_MAGIC) {
313                 config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
314                                 MLX4_EN_WOL_MAGIC;
315         } else {
316                 config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
317                 config |= MLX4_EN_WOL_DO_MODIFY;
318         }
319
320         err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
321         if (err)
322                 en_err(priv, "Failed to set WoL information\n");
323
324         return err;
325 }
326
327 struct bitmap_sim_iterator {
328         bool advance_array;
329         unsigned long *stats_bitmap;
330         unsigned int count;
331         unsigned int j;
332 };
333
334 static inline void bitmap_sim_iterator_init(struct bitmap_sim_iterator *h,
335                                             unsigned long *stats_bitmap,
336                                             int count)
337 {
338         h->j = 0;
339         h->advance_array = !bitmap_empty(stats_bitmap, count);
340         h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
341                 : count;
342         h->stats_bitmap = stats_bitmap;
343 }
344
345 static inline int bitmap_sim_iterator_test(struct bitmap_sim_iterator *h)
346 {
347         return !h->advance_array ? 1 : test_bit(h->j, h->stats_bitmap);
348 }
349
350 static inline int bitmap_sim_iterator_inc(struct bitmap_sim_iterator *h)
351 {
352         return h->j++;
353 }
354
355 static inline unsigned int bitmap_sim_iterator_count(
356                 struct bitmap_sim_iterator *h)
357 {
358         return h->count;
359 }
360
361 int mlx4_en_get_sset_count(struct net_device *dev, int sset)
362 {
363         struct mlx4_en_priv *priv = netdev_priv(dev);
364         struct bitmap_sim_iterator it;
365
366         int num_of_stats = NUM_ALL_STATS -
367                 ((priv->mdev->dev->caps.flags2 &
368                  MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
369
370         bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
371
372         switch (sset) {
373         case ETH_SS_STATS:
374                 return bitmap_sim_iterator_count(&it) +
375                         (priv->tx_ring_num * 2) +
376 #ifdef LL_EXTENDED_STATS
377                         (priv->rx_ring_num * 5);
378 #else
379                         (priv->rx_ring_num * 2);
380 #endif
381         case ETH_SS_TEST:
382                 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
383                                         & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
384         default:
385                 return -EOPNOTSUPP;
386         }
387 }
388
389 void mlx4_en_get_ethtool_stats(struct net_device *dev,
390                 struct ethtool_stats *stats, u64 *data)
391 {
392         struct mlx4_en_priv *priv = netdev_priv(dev);
393         int index = 0;
394         int i;
395         struct bitmap_sim_iterator it;
396
397         int num_of_stats = NUM_ALL_STATS -
398                 ((priv->mdev->dev->caps.flags2 &
399                  MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
400
401         bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
402
403         if (!data || !priv->port_up)
404                 return;
405
406         spin_lock_bh(&priv->stats_lock);
407
408         for (i = 0; i < NUM_PKT_STATS; i++,
409                         bitmap_sim_iterator_inc(&it))
410                 if (bitmap_sim_iterator_test(&it))
411                         data[index++] =
412                                 ((unsigned long *)&priv->pkstats)[i];
413         for (i = 0; i < NUM_FLOW_STATS; i++,
414                         bitmap_sim_iterator_inc(&it))
415                 if (priv->mdev->dev->caps.flags2 &
416                     MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)
417                         if (bitmap_sim_iterator_test(&it))
418                                 data[index++] =
419                                         ((u64 *)&priv->flowstats)[i];
420         for (i = 0; i < NUM_VF_STATS; i++,
421                         bitmap_sim_iterator_inc(&it))
422                 if (bitmap_sim_iterator_test(&it))
423                         data[index++] =
424                                 ((unsigned long *)&priv->vf_stats)[i];
425         for (i = 0; i < NUM_VPORT_STATS; i++,
426                         bitmap_sim_iterator_inc(&it))
427                 if (bitmap_sim_iterator_test(&it))
428                         data[index++] =
429                                 ((unsigned long *)&priv->vport_stats)[i];
430         for (i = 0; i < NUM_PORT_STATS; i++,
431                         bitmap_sim_iterator_inc(&it))
432                 if (bitmap_sim_iterator_test(&it))
433                         data[index++] =
434                                 ((unsigned long *)&priv->port_stats)[i];
435
436         for (i = 0; i < priv->tx_ring_num; i++) {
437                 data[index++] = priv->tx_ring[i]->packets;
438                 data[index++] = priv->tx_ring[i]->bytes;
439         }
440         for (i = 0; i < priv->rx_ring_num; i++) {
441                 data[index++] = priv->rx_ring[i]->packets;
442                 data[index++] = priv->rx_ring[i]->bytes;
443 #ifdef LL_EXTENDED_STATS
444                 data[index++] = priv->rx_ring[i]->yields;
445                 data[index++] = priv->rx_ring[i]->misses;
446                 data[index++] = priv->rx_ring[i]->cleaned;
447 #endif
448         }
449         spin_unlock_bh(&priv->stats_lock);
450
451 }
452
453 void mlx4_en_restore_ethtool_stats(struct mlx4_en_priv *priv, u64 *data)
454 {
455         int index = 0;
456         int i;
457         struct bitmap_sim_iterator it;
458
459         int num_of_stats = NUM_ALL_STATS -
460                 ((priv->mdev->dev->caps.flags2 &
461                  MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
462
463         bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
464
465         if (!data || !priv->port_up)
466                 return;
467
468         spin_lock_bh(&priv->stats_lock);
469
470         for (i = 0; i < NUM_PKT_STATS; i++,
471               bitmap_sim_iterator_inc(&it))
472                         if (bitmap_sim_iterator_test(&it))
473                                 ((unsigned long *)&priv->pkstats)[i] =
474                                         data[index++];
475         for (i = 0; i < NUM_FLOW_STATS; i++,
476               bitmap_sim_iterator_inc(&it))
477                 if (priv->mdev->dev->caps.flags2 &
478                     MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)
479                         if (bitmap_sim_iterator_test(&it))
480                                 ((u64 *)&priv->flowstats)[i] =
481                                          data[index++];
482         for (i = 0; i < NUM_VF_STATS; i++,
483               bitmap_sim_iterator_inc(&it))
484                         if (bitmap_sim_iterator_test(&it))
485                                 ((unsigned long *)&priv->vf_stats)[i] =
486                                         data[index++];
487         for (i = 0; i < NUM_VPORT_STATS; i++,
488               bitmap_sim_iterator_inc(&it))
489                         if (bitmap_sim_iterator_test(&it))
490                                 ((unsigned long *)&priv->vport_stats)[i] =
491                                         data[index++];
492         for (i = 0; i < NUM_PORT_STATS; i++,
493               bitmap_sim_iterator_inc(&it))
494                         if (bitmap_sim_iterator_test(&it))
495                                 ((unsigned long *)&priv->port_stats)[i] =
496                                         data[index++];
497
498         for (i = 0; i < priv->tx_ring_num; i++) {
499                 priv->tx_ring[i]->packets = data[index++];
500                 priv->tx_ring[i]->bytes = data[index++];
501         }
502         for (i = 0; i < priv->rx_ring_num; i++) {
503                 priv->rx_ring[i]->packets = data[index++];
504                 priv->rx_ring[i]->bytes = data[index++];
505         }
506         spin_unlock_bh(&priv->stats_lock);
507 }
508
509 static void mlx4_en_self_test(struct net_device *dev,
510                               struct ethtool_test *etest, u64 *buf)
511 {
512         mlx4_en_ex_selftest(dev, &etest->flags, buf);
513 }
514
515 static void mlx4_en_get_strings(struct net_device *dev,
516                                 uint32_t stringset, uint8_t *data)
517 {
518         struct mlx4_en_priv *priv = netdev_priv(dev);
519         int index = 0;
520         int i, k;
521         struct bitmap_sim_iterator it;
522
523         int num_of_stats = NUM_ALL_STATS -
524                 ((priv->mdev->dev->caps.flags2 &
525                  MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
526
527         bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
528
529         switch (stringset) {
530         case ETH_SS_TEST:
531                 for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
532                         strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
533                 if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
534                         for (; i < MLX4_EN_NUM_SELF_TEST; i++)
535                                 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
536                 break;
537
538         case ETH_SS_STATS:
539                 /* Add main counters */
540                 for (i = 0; i < NUM_PKT_STATS; i++,
541                      bitmap_sim_iterator_inc(&it))
542                         if (bitmap_sim_iterator_test(&it))
543                                 strcpy(data + (index++) * ETH_GSTRING_LEN,
544                                        main_strings[i]);
545
546                 for (k = 0; k < NUM_FLOW_STATS; k++,
547                      bitmap_sim_iterator_inc(&it))
548                         if (priv->mdev->dev->caps.flags2 &
549                             MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)
550                                 if (bitmap_sim_iterator_test(&it))
551                                         strcpy(data + (index++) *
552                                                ETH_GSTRING_LEN,
553                                                 main_strings[i + k]);
554
555                 for (; (i + k) < num_of_stats; i++,
556                      bitmap_sim_iterator_inc(&it))
557                         if (bitmap_sim_iterator_test(&it))
558                                 strcpy(data + (index++) * ETH_GSTRING_LEN,
559                                        main_strings[i + k]);
560
561                 for (i = 0; i < priv->tx_ring_num; i++) {
562                         sprintf(data + (index++) * ETH_GSTRING_LEN,
563                                 "tx%d_packets", i);
564                         sprintf(data + (index++) * ETH_GSTRING_LEN,
565                                 "tx%d_bytes", i);
566                 }
567                 for (i = 0; i < priv->rx_ring_num; i++) {
568                         sprintf(data + (index++) * ETH_GSTRING_LEN,
569                                 "rx%d_packets", i);
570                         sprintf(data + (index++) * ETH_GSTRING_LEN,
571                                 "rx%d_bytes", i);
572 #ifdef LL_EXTENDED_STATS
573                         sprintf(data + (index++) * ETH_GSTRING_LEN,
574                                 "rx%d_napi_yield", i);
575                         sprintf(data + (index++) * ETH_GSTRING_LEN,
576                                 "rx%d_misses", i);
577                         sprintf(data + (index++) * ETH_GSTRING_LEN,
578                                 "rx%d_cleaned", i);
579 #endif
580                 }
581                 break;
582         }
583 }
584
585 static u32 mlx4_en_autoneg_get(struct net_device *dev)
586 {
587         struct mlx4_en_priv *priv = netdev_priv(dev);
588         struct mlx4_en_dev *mdev = priv->mdev;
589         u32 autoneg = AUTONEG_DISABLE;
590
591         if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
592             priv->port_state.autoneg) {
593                 autoneg = AUTONEG_ENABLE;
594         }
595
596         return autoneg;
597 }
598
599 static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
600 {
601         struct mlx4_en_priv *priv = netdev_priv(dev);
602         int trans_type;
603
604         /* SUPPORTED_1000baseT_Half isn't supported */
605         cmd->supported = SUPPORTED_1000baseT_Full
606                         |SUPPORTED_10000baseT_Full;
607
608         cmd->advertising = ADVERTISED_1000baseT_Full
609                           |ADVERTISED_10000baseT_Full;
610
611         cmd->supported |= SUPPORTED_1000baseKX_Full
612                         |SUPPORTED_10000baseKX4_Full
613                         |SUPPORTED_10000baseKR_Full
614                         |SUPPORTED_10000baseR_FEC
615                         |SUPPORTED_40000baseKR4_Full
616                         |SUPPORTED_40000baseCR4_Full
617                         |SUPPORTED_40000baseSR4_Full
618                         |SUPPORTED_40000baseLR4_Full;
619
620         /* ADVERTISED_1000baseT_Half isn't advertised */
621         cmd->advertising |= ADVERTISED_1000baseKX_Full
622                           |ADVERTISED_10000baseKX4_Full
623                           |ADVERTISED_10000baseKR_Full
624                           |ADVERTISED_10000baseR_FEC
625                           |ADVERTISED_40000baseKR4_Full
626                           |ADVERTISED_40000baseCR4_Full
627                           |ADVERTISED_40000baseSR4_Full
628                           |ADVERTISED_40000baseLR4_Full;
629
630         if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
631                 return -ENOMEM;
632
633         cmd->autoneg = mlx4_en_autoneg_get(dev);
634         if (cmd->autoneg == AUTONEG_ENABLE) {
635                 cmd->supported |= SUPPORTED_Autoneg;
636                 cmd->advertising |= ADVERTISED_Autoneg;
637         }
638
639         trans_type = priv->port_state.transciver;
640         if (netif_carrier_ok(dev)) {
641                 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
642                 cmd->duplex = DUPLEX_FULL;
643         } else {
644                 ethtool_cmd_speed_set(cmd, -1);
645                 cmd->duplex = -1;
646         }
647
648         if (trans_type > 0 && trans_type <= 0xC) {
649                 cmd->port = PORT_FIBRE;
650                 cmd->transceiver = XCVR_EXTERNAL;
651                 cmd->supported |= SUPPORTED_FIBRE;
652                 cmd->advertising |= ADVERTISED_FIBRE;
653         } else if (trans_type == 0x80 || trans_type == 0) {
654                 cmd->port = PORT_TP;
655                 cmd->transceiver = XCVR_INTERNAL;
656                 cmd->supported |= SUPPORTED_TP;
657                 cmd->advertising |= ADVERTISED_TP;
658         } else  {
659                 cmd->port = -1;
660                 cmd->transceiver = -1;
661         }
662         return 0;
663 }
664
665 static const char *mlx4_en_duplex_to_string(int duplex)
666 {
667         switch (duplex) {
668         case DUPLEX_FULL:
669                 return "FULL";
670         case DUPLEX_HALF:
671                 return "HALF";
672         default:
673                 break;
674         }
675         return "UNKNOWN";
676 }
677
678 static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
679 {
680         struct mlx4_en_priv *priv = netdev_priv(dev);
681         struct mlx4_en_port_state *port_state = &priv->port_state;
682
683         if ((cmd->autoneg != port_state->autoneg) ||
684             (ethtool_cmd_speed(cmd) != port_state->link_speed) ||
685             (cmd->duplex != DUPLEX_FULL)) {
686                 en_info(priv, "Changing port state properties (auto-negotiation"
687                               " , speed/duplex) is not supported. Current:"
688                               " auto-negotiation=%d speed/duplex=%d/%s\n",
689                               port_state->autoneg, port_state->link_speed,
690                               mlx4_en_duplex_to_string(DUPLEX_FULL));
691                 return -EOPNOTSUPP;
692         }
693
694         /* User provided same port state properties that are currently set.
695          * Nothing to change
696          */
697         return 0;
698 }
699
700 static int mlx4_en_get_coalesce(struct net_device *dev,
701                               struct ethtool_coalesce *coal)
702 {
703         struct mlx4_en_priv *priv = netdev_priv(dev);
704
705         coal->tx_coalesce_usecs = priv->tx_usecs;
706         coal->tx_max_coalesced_frames = priv->tx_frames;
707         coal->rx_coalesce_usecs = priv->rx_usecs;
708         coal->rx_max_coalesced_frames = priv->rx_frames;
709
710         coal->pkt_rate_low = priv->pkt_rate_low;
711         coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
712         coal->pkt_rate_high = priv->pkt_rate_high;
713         coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
714         coal->rate_sample_interval = priv->sample_interval;
715         coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
716         return 0;
717 }
718
719 static int mlx4_en_set_coalesce(struct net_device *dev,
720                               struct ethtool_coalesce *coal)
721 {
722         struct mlx4_en_priv *priv = netdev_priv(dev);
723         int err, i;
724
725         priv->rx_frames = (coal->rx_max_coalesced_frames ==
726                            MLX4_EN_AUTO_CONF) ?
727                                 MLX4_EN_RX_COAL_TARGET /
728                                 priv->dev->mtu + 1 :
729                                 coal->rx_max_coalesced_frames;
730         priv->rx_usecs = (coal->rx_coalesce_usecs ==
731                           MLX4_EN_AUTO_CONF) ?
732                                 MLX4_EN_RX_COAL_TIME :
733                                 coal->rx_coalesce_usecs;
734
735         /* Setting TX coalescing parameters */
736         if (coal->tx_coalesce_usecs != priv->tx_usecs ||
737             coal->tx_max_coalesced_frames != priv->tx_frames) {
738                 priv->tx_usecs = coal->tx_coalesce_usecs;
739                 priv->tx_frames = coal->tx_max_coalesced_frames;
740                 if (priv->port_up) {
741                         for (i = 0; i < priv->tx_ring_num; i++) {
742                                 priv->tx_cq[i]->moder_cnt = priv->tx_frames;
743                                 priv->tx_cq[i]->moder_time = priv->tx_usecs;
744                                 if (mlx4_en_set_cq_moder(priv, priv->tx_cq[i]))
745                                         en_warn(priv, "Failed changing moderation for TX cq %d\n", i);
746                         }
747                 }
748         }
749
750         /* Set adaptive coalescing params */
751         priv->pkt_rate_low = coal->pkt_rate_low;
752         priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
753         priv->pkt_rate_high = coal->pkt_rate_high;
754         priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
755         priv->sample_interval = coal->rate_sample_interval;
756         priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
757         if (priv->adaptive_rx_coal)
758                 return 0;
759
760         if (priv->port_up) {
761                 for (i = 0; i < priv->rx_ring_num; i++) {
762                         priv->rx_cq[i]->moder_cnt = priv->rx_frames;
763                         priv->rx_cq[i]->moder_time = priv->rx_usecs;
764                         priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
765                                 err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
766                         if (err)
767                                 return err;
768                 }
769         }
770
771         return 0;
772 }
773
774 static int mlx4_en_set_pauseparam(struct net_device *dev,
775                                 struct ethtool_pauseparam *pause)
776 {
777         struct mlx4_en_priv *priv = netdev_priv(dev);
778         struct mlx4_en_dev *mdev = priv->mdev;
779         int err;
780
781         if (pause->autoneg)
782                 return -EOPNOTSUPP;
783
784         priv->prof->tx_pause = pause->tx_pause != 0;
785         priv->prof->rx_pause = pause->rx_pause != 0;
786         err = mlx4_SET_PORT_general(mdev->dev, priv->port,
787                                     priv->rx_skb_size + ETH_FCS_LEN,
788                                     priv->prof->tx_pause,
789                                     priv->prof->tx_ppp,
790                                     priv->prof->rx_pause,
791                                     priv->prof->rx_ppp);
792         if (err)
793                 en_err(priv, "Failed setting pause params\n");
794
795         return err;
796 }
797
798 static void mlx4_en_get_pauseparam(struct net_device *dev,
799                                  struct ethtool_pauseparam *pause)
800 {
801         struct mlx4_en_priv *priv = netdev_priv(dev);
802
803         pause->tx_pause = priv->prof->tx_pause;
804         pause->rx_pause = priv->prof->rx_pause;
805         pause->autoneg = mlx4_en_autoneg_get(dev);
806 }
807
808 /* rtnl lock must be taken before calling */
809 int mlx4_en_pre_config(struct mlx4_en_priv *priv)
810 {
811 #ifdef CONFIG_RFS_ACCEL
812         struct cpu_rmap *rmap;
813
814         if (!priv->dev->rx_cpu_rmap)
815                 return 0;
816
817         /* Disable RFS events
818          * Must have all RFS jobs flushed before freeing resources
819          */
820         rmap = priv->dev->rx_cpu_rmap;
821         priv->dev->rx_cpu_rmap = NULL;
822
823         rtnl_unlock();
824         free_irq_cpu_rmap(rmap);
825         rtnl_lock();
826
827         if (priv->dev->rx_cpu_rmap)
828                 return -EBUSY; /* another configuration completed while lock
829                                 * was free
830                                 */
831
832         /* Make sure all currently running filter_work are being processed
833          * Other work will return immediatly because of disable_rfs
834          */
835         flush_workqueue(priv->mdev->workqueue);
836
837 #endif
838
839         return 0;
840 }
841
842 static int mlx4_en_set_ringparam(struct net_device *dev,
843                                  struct ethtool_ringparam *param)
844 {
845         struct mlx4_en_priv *priv = netdev_priv(dev);
846         struct mlx4_en_dev *mdev = priv->mdev;
847         u32 rx_size, tx_size;
848         int port_up = 0;
849         int err = 0;
850         int i, n_stats;
851         u64 *data = NULL;
852
853         if (!priv->port_up)
854                 return -ENOMEM;
855
856         if (param->rx_jumbo_pending || param->rx_mini_pending)
857                 return -EINVAL;
858
859         rx_size = roundup_pow_of_two(param->rx_pending);
860         rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
861         rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
862         tx_size = roundup_pow_of_two(param->tx_pending);
863         tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
864         tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
865
866         if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
867                                         priv->rx_ring[0]->size) &&
868             tx_size == priv->tx_ring[0]->size)
869                 return 0;
870         err = mlx4_en_pre_config(priv);
871         if (err)
872                 return err;
873
874         mutex_lock(&mdev->state_lock);
875         if (priv->port_up) {
876                 port_up = 1;
877                 mlx4_en_stop_port(dev);
878         }
879
880         /* Cache port statistics */
881         n_stats = mlx4_en_get_sset_count(dev, ETH_SS_STATS);
882         if (n_stats > 0) {
883                 data = kmalloc(n_stats * sizeof(u64), GFP_KERNEL);
884                 if (data)
885                         mlx4_en_get_ethtool_stats(dev, NULL, data);
886         }
887
888         mlx4_en_free_resources(priv);
889
890         priv->prof->tx_ring_size = tx_size;
891         priv->prof->rx_ring_size = rx_size;
892
893         err = mlx4_en_alloc_resources(priv);
894         if (err) {
895                 en_err(priv, "Failed reallocating port resources\n");
896                 goto out;
897         }
898
899         /* Restore port statistics */
900         if (n_stats > 0 && data)
901                 mlx4_en_restore_ethtool_stats(priv, data);
902
903         if (port_up) {
904                 err = mlx4_en_start_port(dev);
905                 if (err) {
906                         en_err(priv, "Failed starting port\n");
907                         goto out;
908                 }
909
910                 for (i = 0; i < priv->rx_ring_num; i++) {
911                         priv->rx_cq[i]->moder_cnt = priv->rx_frames;
912                         priv->rx_cq[i]->moder_time = priv->rx_usecs;
913                         priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
914                         err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
915                         if (err)
916                                 goto out;
917                 }
918         }
919
920 out:
921         kfree(data);
922         mutex_unlock(&mdev->state_lock);
923         return err;
924 }
925
926 static void mlx4_en_get_ringparam(struct net_device *dev,
927                                   struct ethtool_ringparam *param)
928 {
929         struct mlx4_en_priv *priv = netdev_priv(dev);
930
931         if (!priv->port_up)
932                 return;
933
934         memset(param, 0, sizeof(*param));
935         param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
936         param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
937         param->rx_pending = priv->port_up ?
938                 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
939         param->tx_pending = priv->tx_ring[0]->size;
940 }
941
942 static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
943 {
944         struct mlx4_en_priv *priv = netdev_priv(dev);
945
946         return priv->rx_ring_num;
947 }
948
949 static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
950 {
951         struct mlx4_en_priv *priv = netdev_priv(dev);
952         struct mlx4_en_rss_map *rss_map = &priv->rss_map;
953         int rss_rings;
954         size_t n = priv->rx_ring_num;
955         int err = 0;
956
957         rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
958         rss_rings = 1 << ilog2(rss_rings);
959
960         while (n--) {
961                 ring_index[n] = rss_map->qps[n % rss_rings].qpn -
962                         rss_map->base_qpn;
963         }
964
965         return err;
966 }
967
968 static int mlx4_en_set_rxfh_indir(struct net_device *dev,
969                 const u32 *ring_index)
970 {
971         struct mlx4_en_priv *priv = netdev_priv(dev);
972         struct mlx4_en_dev *mdev = priv->mdev;
973         int port_up = 0;
974         int err = 0;
975         int i;
976         int rss_rings = 0;
977
978         /* Calculate RSS table size and make sure flows are spread evenly
979          * between rings
980          */
981         for (i = 0; i < priv->rx_ring_num; i++) {
982                 if (i > 0 && !ring_index[i] && !rss_rings)
983                         rss_rings = i;
984
985                 if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
986                         return -EINVAL;
987         }
988
989         if (!rss_rings)
990                 rss_rings = priv->rx_ring_num;
991
992         /* RSS table size must be an order of 2 */
993         if (!is_power_of_2(rss_rings))
994                 return -EINVAL;
995
996         mutex_lock(&mdev->state_lock);
997         if (priv->port_up) {
998                 port_up = 1;
999                 mlx4_en_stop_port(dev);
1000         }
1001
1002         priv->prof->rss_rings = rss_rings;
1003
1004         if (port_up) {
1005                 err = mlx4_en_start_port(dev);
1006                 if (err)
1007                         en_err(priv, "Failed starting port\n");
1008         }
1009
1010         mutex_unlock(&mdev->state_lock);
1011         return err;
1012 }
1013
1014 #define all_zeros_or_all_ones(field)            \
1015         ((field) == 0 || (field) == (__force typeof(field))-1)
1016
1017 static int mlx4_en_validate_flow(struct net_device *dev,
1018                                  struct mlx4_ethtool_rxnfc *cmd)
1019 {
1020         struct ethtool_usrip4_spec *l3_mask;
1021         struct ethtool_tcpip4_spec *l4_mask;
1022         struct ethhdr *eth_mask;
1023
1024         if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1025                 return -EINVAL;
1026
1027         if (cmd->fs.flow_type & FLOW_MAC_EXT) {
1028                 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
1029                 if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
1030                         return -EINVAL;
1031         }
1032
1033         switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1034         case TCP_V4_FLOW:
1035         case UDP_V4_FLOW:
1036                 if (cmd->fs.m_u.tcp_ip4_spec.tos)
1037                         return -EINVAL;
1038                 l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
1039                 /* don't allow mask which isn't all 0 or 1 */
1040                 if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
1041                     !all_zeros_or_all_ones(l4_mask->ip4dst) ||
1042                     !all_zeros_or_all_ones(l4_mask->psrc) ||
1043                     !all_zeros_or_all_ones(l4_mask->pdst))
1044                         return -EINVAL;
1045                 break;
1046         case IP_USER_FLOW:
1047                 l3_mask = &cmd->fs.m_u.usr_ip4_spec;
1048                 if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
1049                     cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
1050                     (!l3_mask->ip4src && !l3_mask->ip4dst) ||
1051                     !all_zeros_or_all_ones(l3_mask->ip4src) ||
1052                     !all_zeros_or_all_ones(l3_mask->ip4dst))
1053                         return -EINVAL;
1054                 break;
1055         case ETHER_FLOW:
1056                 eth_mask = &cmd->fs.m_u.ether_spec;
1057                 /* source mac mask must not be set */
1058                 if (!is_zero_ether_addr(eth_mask->h_source))
1059                         return -EINVAL;
1060
1061                 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
1062                 if (!is_broadcast_ether_addr(eth_mask->h_dest))
1063                         return -EINVAL;
1064
1065                 if (!all_zeros_or_all_ones(eth_mask->h_proto))
1066                         return -EINVAL;
1067                 break;
1068         default:
1069                 return -EINVAL;
1070         }
1071
1072         if ((cmd->fs.flow_type & FLOW_EXT)) {
1073                 if (cmd->fs.m_ext.vlan_etype ||
1074                     !(cmd->fs.m_ext.vlan_tci == 0 ||
1075                       cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
1076                         return -EINVAL;
1077                 if (cmd->fs.m_ext.vlan_tci) {
1078                         if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) <
1079                             VLAN_MIN_VALUE ||
1080                             be16_to_cpu(cmd->fs.h_ext.vlan_tci) >
1081                             VLAN_MAX_VALUE)
1082                                 return -EINVAL;
1083                 }
1084         }
1085
1086         return 0;
1087 }
1088
1089 static int mlx4_en_ethtool_add_mac_rule(struct mlx4_ethtool_rxnfc *cmd,
1090                                         struct list_head *rule_list_h,
1091                                         struct mlx4_spec_list *spec_l2,
1092                                         unsigned char *mac)
1093 {
1094         int err = 0;
1095         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
1096
1097         spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
1098         memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
1099         memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
1100
1101         if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
1102                 spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
1103                 spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
1104         }
1105
1106         list_add_tail(&spec_l2->list, rule_list_h);
1107
1108         return err;
1109 }
1110
1111 static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
1112                                                 struct mlx4_ethtool_rxnfc *cmd,
1113                                                 struct list_head *rule_list_h,
1114                                                 struct mlx4_spec_list *spec_l2,
1115                                                 __be32 ipv4_dst)
1116 {
1117         unsigned char mac[ETH_ALEN];
1118
1119         if (!ipv4_is_multicast(ipv4_dst)) {
1120                 if (cmd->fs.flow_type & FLOW_MAC_EXT)
1121                         memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
1122                 else
1123                         memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
1124         } else {
1125                 ip_eth_mc_map(ipv4_dst, mac);
1126         }
1127
1128         return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
1129 }
1130
1131 static int add_ip_rule(struct mlx4_en_priv *priv,
1132                                 struct mlx4_ethtool_rxnfc *cmd,
1133                                 struct list_head *list_h)
1134 {
1135         struct mlx4_spec_list *spec_l2 = NULL;
1136         struct mlx4_spec_list *spec_l3 = NULL;
1137         struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
1138
1139         spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
1140         spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
1141         if (!spec_l2 || !spec_l3) {
1142                 en_err(priv, "Fail to alloc ethtool rule.\n");
1143                 kfree(spec_l2);
1144                 kfree(spec_l3);
1145                 return -ENOMEM;
1146         }
1147
1148         mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
1149                                              cmd->fs.h_u.
1150                                              usr_ip4_spec.ip4dst);
1151         spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
1152         spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
1153         if (l3_mask->ip4src)
1154                 spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
1155         spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
1156         if (l3_mask->ip4dst)
1157                 spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
1158         list_add_tail(&spec_l3->list, list_h);
1159
1160         return 0;
1161 }
1162
1163 static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
1164                              struct mlx4_ethtool_rxnfc *cmd,
1165                              struct list_head *list_h, int proto)
1166 {
1167         struct mlx4_spec_list *spec_l2 = NULL;
1168         struct mlx4_spec_list *spec_l3 = NULL;
1169         struct mlx4_spec_list *spec_l4 = NULL;
1170         struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
1171
1172         spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
1173         spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
1174         spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
1175         if (!spec_l2 || !spec_l3 || !spec_l4) {
1176                 en_err(priv, "Fail to alloc ethtool rule.\n");
1177                 kfree(spec_l2);
1178                 kfree(spec_l3);
1179                 kfree(spec_l4);
1180                 return -ENOMEM;
1181         }
1182
1183         spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
1184
1185         if (proto == TCP_V4_FLOW) {
1186                 mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
1187                                                      spec_l2,
1188                                                      cmd->fs.h_u.
1189                                                      tcp_ip4_spec.ip4dst);
1190                 spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
1191                 spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
1192                 spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
1193                 spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
1194                 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
1195         } else {
1196                 mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
1197                                                      spec_l2,
1198                                                      cmd->fs.h_u.
1199                                                      udp_ip4_spec.ip4dst);
1200                 spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
1201                 spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
1202                 spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
1203                 spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
1204                 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
1205         }
1206
1207         if (l4_mask->ip4src)
1208                 spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
1209         if (l4_mask->ip4dst)
1210                 spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
1211
1212         if (l4_mask->psrc)
1213                 spec_l4->tcp_udp.src_port_msk = MLX4_BE_SHORT_MASK;
1214         if (l4_mask->pdst)
1215                 spec_l4->tcp_udp.dst_port_msk = MLX4_BE_SHORT_MASK;
1216
1217         list_add_tail(&spec_l3->list, list_h);
1218         list_add_tail(&spec_l4->list, list_h);
1219
1220         return 0;
1221 }
1222
1223 static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
1224                                              struct mlx4_ethtool_rxnfc *cmd,
1225                                              struct list_head *rule_list_h)
1226 {
1227         int err;
1228         struct ethhdr *eth_spec;
1229         struct mlx4_spec_list *spec_l2;
1230         struct mlx4_en_priv *priv = netdev_priv(dev);
1231
1232         err = mlx4_en_validate_flow(dev, cmd);
1233         if (err)
1234                 return err;
1235
1236         switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1237         case ETHER_FLOW:
1238                 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
1239                 if (!spec_l2)
1240                         return -ENOMEM;
1241
1242                 eth_spec = &cmd->fs.h_u.ether_spec;
1243                 mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &eth_spec->h_dest[0]);
1244                 spec_l2->eth.ether_type = eth_spec->h_proto;
1245                 if (eth_spec->h_proto)
1246                         spec_l2->eth.ether_type_enable = 1;
1247                 break;
1248         case IP_USER_FLOW:
1249                 err = add_ip_rule(priv, cmd, rule_list_h);
1250                 break;
1251         case TCP_V4_FLOW:
1252                 err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
1253                 break;
1254         case UDP_V4_FLOW:
1255                 err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
1256                 break;
1257         }
1258
1259         return err;
1260 }
1261
1262 static int mlx4_en_flow_replace(struct net_device *dev,
1263                                 struct mlx4_ethtool_rxnfc *cmd)
1264 {
1265         int err;
1266         struct mlx4_en_priv *priv = netdev_priv(dev);
1267         struct mlx4_en_dev *mdev = priv->mdev;
1268         struct ethtool_flow_id *loc_rule;
1269         struct mlx4_spec_list *spec, *tmp_spec;
1270         u32 qpn;
1271         u64 reg_id;
1272
1273         struct mlx4_net_trans_rule rule = {
1274                 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1275                 .exclusive = 0,
1276                 .allow_loopback = 1,
1277                 .promisc_mode = MLX4_FS_REGULAR,
1278         };
1279
1280         rule.port = priv->port;
1281         rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
1282         INIT_LIST_HEAD(&rule.list);
1283
1284         /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
1285         if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
1286                 qpn = priv->drop_qp.qpn;
1287         else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
1288                 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
1289         } else {
1290                 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
1291                         en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
1292                                 cmd->fs.ring_cookie);
1293                         return -EINVAL;
1294                 }
1295                 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
1296                 if (!qpn) {
1297                         en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
1298                                 cmd->fs.ring_cookie);
1299                         return -EINVAL;
1300                 }
1301         }
1302         rule.qpn = qpn;
1303         err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
1304         if (err)
1305                 goto out_free_list;
1306
1307         mutex_lock(&mdev->state_lock);
1308         loc_rule = &priv->ethtool_rules[cmd->fs.location];
1309         if (loc_rule->id) {
1310                 err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
1311                 if (err) {
1312                         en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
1313                                cmd->fs.location, loc_rule->id);
1314                         goto unlock;
1315                 }
1316                 loc_rule->id = 0;
1317                 memset(&loc_rule->flow_spec, 0,
1318                        sizeof(struct ethtool_rx_flow_spec));
1319                 list_del(&loc_rule->list);
1320         }
1321         err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
1322         if (err) {
1323                 en_err(priv, "Fail to attach network rule at location %d.\n",
1324                        cmd->fs.location);
1325                 goto unlock;
1326         }
1327         loc_rule->id = reg_id;
1328         memcpy(&loc_rule->flow_spec, &cmd->fs,
1329                sizeof(struct ethtool_rx_flow_spec));
1330         list_add_tail(&loc_rule->list, &priv->ethtool_list);
1331
1332 unlock:
1333         mutex_unlock(&mdev->state_lock);
1334 out_free_list:
1335         list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
1336                 list_del(&spec->list);
1337                 kfree(spec);
1338         }
1339         return err;
1340 }
1341
1342 static int mlx4_en_flow_detach(struct net_device *dev,
1343                                struct mlx4_ethtool_rxnfc *cmd)
1344 {
1345         int err = 0;
1346         struct ethtool_flow_id *rule;
1347         struct mlx4_en_priv *priv = netdev_priv(dev);
1348         struct mlx4_en_dev *mdev = priv->mdev;
1349
1350         if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1351                 return -EINVAL;
1352
1353         mutex_lock(&mdev->state_lock);
1354         rule = &priv->ethtool_rules[cmd->fs.location];
1355         if (!rule->id) {
1356                 err =  -ENOENT;
1357                 goto out;
1358         }
1359
1360         err = mlx4_flow_detach(priv->mdev->dev, rule->id);
1361         if (err) {
1362                 en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
1363                        cmd->fs.location, rule->id);
1364                 goto out;
1365         }
1366         rule->id = 0;
1367         memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
1368
1369         list_del(&rule->list);
1370 out:
1371         mutex_unlock(&mdev->state_lock);
1372         return err;
1373
1374 }
1375
1376 static int mlx4_en_get_flow(struct net_device *dev, struct mlx4_ethtool_rxnfc *cmd,
1377                             int loc)
1378 {
1379         int err = 0;
1380         struct ethtool_flow_id *rule;
1381         struct mlx4_en_priv *priv = netdev_priv(dev);
1382
1383         if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
1384                 return -EINVAL;
1385
1386         rule = &priv->ethtool_rules[loc];
1387         if (rule->id)
1388                 memcpy(&cmd->fs, &rule->flow_spec,
1389                        sizeof(struct ethtool_rx_flow_spec));
1390         else
1391                 err = -ENOENT;
1392
1393         return err;
1394 }
1395
1396 static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
1397 {
1398
1399         int i, res = 0;
1400         for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
1401                 if (priv->ethtool_rules[i].id)
1402                         res++;
1403         }
1404         return res;
1405
1406 }
1407
1408 static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *c,
1409                              u32 *rule_locs)
1410 {
1411         struct mlx4_en_priv *priv = netdev_priv(dev);
1412         struct mlx4_en_dev *mdev = priv->mdev;
1413         int err = 0;
1414         int i = 0, priority = 0;
1415         struct mlx4_ethtool_rxnfc *cmd = (struct mlx4_ethtool_rxnfc *)c;
1416
1417         if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
1418              cmd->cmd == ETHTOOL_GRXCLSRULE ||
1419              cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
1420             (mdev->dev->caps.steering_mode !=
1421              MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
1422                 return -EINVAL;
1423
1424         switch (cmd->cmd) {
1425         case ETHTOOL_GRXRINGS:
1426                 cmd->data = priv->rx_ring_num;
1427                 break;
1428         case ETHTOOL_GRXCLSRLCNT:
1429                 cmd->rule_cnt = mlx4_en_get_num_flows(priv);
1430                 break;
1431         case ETHTOOL_GRXCLSRULE:
1432                 err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
1433                 break;
1434         case ETHTOOL_GRXCLSRLALL:
1435                 while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
1436                         err = mlx4_en_get_flow(dev, cmd, i);
1437                         if (!err)
1438                                 rule_locs[priority++] = i;
1439                         i++;
1440                 }
1441                 err = 0;
1442                 break;
1443         default:
1444                 err = -EOPNOTSUPP;
1445                 break;
1446         }
1447
1448         return err;
1449 }
1450
1451 static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *c)
1452 {
1453         int err = 0;
1454         struct mlx4_en_priv *priv = netdev_priv(dev);
1455         struct mlx4_en_dev *mdev = priv->mdev;
1456         struct mlx4_ethtool_rxnfc *cmd = (struct mlx4_ethtool_rxnfc *)c;
1457
1458         if (mdev->dev->caps.steering_mode !=
1459             MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
1460                 return -EINVAL;
1461
1462         switch (cmd->cmd) {
1463         case ETHTOOL_SRXCLSRLINS:
1464                 err = mlx4_en_flow_replace(dev, cmd);
1465                 break;
1466         case ETHTOOL_SRXCLSRLDEL:
1467                 err = mlx4_en_flow_detach(dev, cmd);
1468                 break;
1469         default:
1470                 en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
1471                 return -EINVAL;
1472         }
1473
1474         return err;
1475 }
1476
1477 static void mlx4_en_get_channels(struct net_device *dev,
1478                                  struct ethtool_channels *channel)
1479 {
1480         struct mlx4_en_priv *priv = netdev_priv(dev);
1481
1482         memset(channel, 0, sizeof(*channel));
1483
1484         channel->max_rx = MAX_RX_RINGS;
1485         channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
1486
1487         channel->rx_count = priv->rx_ring_num;
1488         channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
1489 }
1490
1491 static int mlx4_en_set_channels(struct net_device *dev,
1492                                 struct ethtool_channels *channel)
1493 {
1494         struct mlx4_en_priv *priv = netdev_priv(dev);
1495         struct mlx4_en_dev *mdev = priv->mdev;
1496         int port_up = 0;
1497         int i;
1498         int err = 0;
1499
1500         if (channel->other_count || channel->combined_count ||
1501             channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
1502             channel->rx_count > MAX_RX_RINGS ||
1503             !channel->tx_count || !channel->rx_count)
1504                 return -EINVAL;
1505
1506         err = mlx4_en_pre_config(priv);
1507         if (err)
1508                 return err;
1509
1510         mutex_lock(&mdev->state_lock);
1511         if (priv->port_up) {
1512                 port_up = 1;
1513                 mlx4_en_stop_port(dev);
1514         }
1515
1516         mlx4_en_free_resources(priv);
1517
1518         priv->num_tx_rings_p_up = channel->tx_count;
1519         priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
1520         priv->rx_ring_num = channel->rx_count;
1521
1522         err = mlx4_en_alloc_resources(priv);
1523         if (err) {
1524                 en_err(priv, "Failed reallocating port resources\n");
1525                 goto out;
1526         }
1527
1528         netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1529         netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1530
1531         mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
1532
1533         en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
1534         en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
1535
1536         if (port_up) {
1537                 err = mlx4_en_start_port(dev);
1538                 if (err)
1539                         en_err(priv, "Failed starting port\n");
1540
1541                 for (i = 0; i < priv->rx_ring_num; i++) {
1542                         priv->rx_cq[i]->moder_cnt = priv->rx_frames;
1543                         priv->rx_cq[i]->moder_time = priv->rx_usecs;
1544                         priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1545                         err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
1546                         if (err)
1547                                 goto out;
1548                 }
1549         }
1550
1551 out:
1552         mutex_unlock(&mdev->state_lock);
1553         return err;
1554 }
1555
1556 static int mlx4_en_get_ts_info(struct net_device *dev,
1557                                struct ethtool_ts_info *info)
1558 {
1559         struct mlx4_en_priv *priv = netdev_priv(dev);
1560         struct mlx4_en_dev *mdev = priv->mdev;
1561         int ret;
1562
1563         ret = ethtool_op_get_ts_info(dev, info);
1564         if (ret)
1565                 return ret;
1566
1567         if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
1568                 info->so_timestamping |=
1569                         SOF_TIMESTAMPING_TX_HARDWARE |
1570                         SOF_TIMESTAMPING_RX_HARDWARE |
1571                         SOF_TIMESTAMPING_RAW_HARDWARE;
1572
1573                 info->tx_types =
1574                         (1 << HWTSTAMP_TX_OFF) |
1575                         (1 << HWTSTAMP_TX_ON);
1576
1577                 info->rx_filters =
1578                         (1 << HWTSTAMP_FILTER_NONE) |
1579                         (1 << HWTSTAMP_FILTER_ALL);
1580         }
1581
1582         return ret;
1583 }
1584
1585 const struct ethtool_ops mlx4_en_ethtool_ops = {
1586         .get_drvinfo = mlx4_en_get_drvinfo,
1587         .get_settings = mlx4_en_get_settings,
1588         .set_settings = mlx4_en_set_settings,
1589         .get_link = ethtool_op_get_link,
1590         .get_strings = mlx4_en_get_strings,
1591         .get_sset_count = mlx4_en_get_sset_count,
1592         .get_ethtool_stats = mlx4_en_get_ethtool_stats,
1593         .self_test = mlx4_en_self_test,
1594         .get_wol = mlx4_en_get_wol,
1595         .set_wol = mlx4_en_set_wol,
1596         .get_msglevel = mlx4_en_get_msglevel,
1597         .set_msglevel = mlx4_en_set_msglevel,
1598         .get_coalesce = mlx4_en_get_coalesce,
1599         .set_coalesce = mlx4_en_set_coalesce,
1600         .get_pauseparam = mlx4_en_get_pauseparam,
1601         .set_pauseparam = mlx4_en_set_pauseparam,
1602         .get_ringparam = mlx4_en_get_ringparam,
1603         .set_ringparam = mlx4_en_set_ringparam,
1604         .get_rxnfc = mlx4_en_get_rxnfc,
1605         .set_rxnfc = mlx4_en_set_rxnfc,
1606         .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
1607         .get_rxfh_indir = mlx4_en_get_rxfh_indir,
1608         .set_rxfh_indir = mlx4_en_set_rxfh_indir,
1609         .get_channels = mlx4_en_get_channels,
1610         .set_channels = mlx4_en_set_channels,
1611         .get_ts_info = mlx4_en_get_ts_info,
1612 };
1613
1614
1615
1616
1617