]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/ofed/drivers/net/mlx4/en_ethtool.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / ofed / drivers / net / mlx4 / en_ethtool.c
1 /*
2  * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33
34 #include <linux/kernel.h>
35 #include <linux/netdevice.h>
36 #include <linux/mlx4/driver.h>
37 #include <linux/in.h>
38 #include <net/ip.h>
39 #include <linux/bitmap.h>
40
41 #include "mlx4_en.h"
42 #include "en_port.h"
43
44 #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
45
46 union mlx4_ethtool_flow_union {
47         struct ethtool_tcpip4_spec              tcp_ip4_spec;
48         struct ethtool_tcpip4_spec              udp_ip4_spec;
49         struct ethtool_tcpip4_spec              sctp_ip4_spec;
50         struct ethtool_ah_espip4_spec           ah_ip4_spec;
51         struct ethtool_ah_espip4_spec           esp_ip4_spec;
52         struct ethtool_usrip4_spec              usr_ip4_spec;
53         struct ethhdr                           ether_spec;
54         __u8                                    hdata[52];
55 };
56
57 struct mlx4_ethtool_flow_ext {
58         __u8            padding[2];
59         unsigned char   h_dest[ETH_ALEN];
60         __be16          vlan_etype;
61         __be16          vlan_tci;
62         __be32          data[2];
63 };
64
65 struct mlx4_ethtool_rx_flow_spec {
66         __u32           flow_type;
67         union mlx4_ethtool_flow_union h_u;
68         struct mlx4_ethtool_flow_ext h_ext;
69         union mlx4_ethtool_flow_union m_u;
70         struct mlx4_ethtool_flow_ext m_ext;
71         __u64           ring_cookie;
72         __u32           location;
73 };
74
75 struct mlx4_ethtool_rxnfc {
76         __u32                           cmd;
77         __u32                           flow_type;
78         __u64                           data;
79         struct mlx4_ethtool_rx_flow_spec        fs;
80         __u32                           rule_cnt;
81         __u32                           rule_locs[0];
82 };
83
84 #ifndef FLOW_MAC_EXT
85 #define FLOW_MAC_EXT    0x40000000
86 #endif
87
88 static void
89 mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
90 {
91         struct mlx4_en_priv *priv = netdev_priv(dev);
92         struct mlx4_en_dev *mdev = priv->mdev;
93
94         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
95         strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
96                 sizeof(drvinfo->version));
97         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
98                 "%d.%d.%d",
99                 (u16) (mdev->dev->caps.fw_ver >> 32),
100                 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
101                 (u16) (mdev->dev->caps.fw_ver & 0xffff));
102         strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
103                 sizeof(drvinfo->bus_info));
104         drvinfo->n_stats = 0;
105         drvinfo->regdump_len = 0;
106         drvinfo->eedump_len = 0;
107 }
108
109 static const char main_strings[][ETH_GSTRING_LEN] = {
110         /* packet statistics */
111         "rx_packets",
112         "rx_bytes",
113         "rx_multicast_packets",
114         "rx_broadcast_packets",
115         "rx_errors",
116         "rx_dropped",
117         "rx_length_errors",
118         "rx_over_errors",
119         "rx_crc_errors",
120         "rx_jabbers",
121         "rx_in_range_length_error",
122         "rx_out_range_length_error",
123         "rx_lt_64_bytes_packets",
124         "rx_127_bytes_packets",
125         "rx_255_bytes_packets",
126         "rx_511_bytes_packets",
127         "rx_1023_bytes_packets",
128         "rx_1518_bytes_packets",
129         "rx_1522_bytes_packets",
130         "rx_1548_bytes_packets",
131         "rx_gt_1548_bytes_packets",
132         "tx_packets",
133         "tx_bytes",
134         "tx_multicast_packets",
135         "tx_broadcast_packets",
136         "tx_errors",
137         "tx_dropped",
138         "tx_lt_64_bytes_packets",
139         "tx_127_bytes_packets",
140         "tx_255_bytes_packets",
141         "tx_511_bytes_packets",
142         "tx_1023_bytes_packets",
143         "tx_1518_bytes_packets",
144         "tx_1522_bytes_packets",
145         "tx_1548_bytes_packets",
146         "tx_gt_1548_bytes_packets",
147         "rx_prio_0_packets", "rx_prio_0_bytes",
148         "rx_prio_1_packets", "rx_prio_1_bytes",
149         "rx_prio_2_packets", "rx_prio_2_bytes",
150         "rx_prio_3_packets", "rx_prio_3_bytes",
151         "rx_prio_4_packets", "rx_prio_4_bytes",
152         "rx_prio_5_packets", "rx_prio_5_bytes",
153         "rx_prio_6_packets", "rx_prio_6_bytes",
154         "rx_prio_7_packets", "rx_prio_7_bytes",
155         "rx_novlan_packets", "rx_novlan_bytes",
156         "tx_prio_0_packets", "tx_prio_0_bytes",
157         "tx_prio_1_packets", "tx_prio_1_bytes",
158         "tx_prio_2_packets", "tx_prio_2_bytes",
159         "tx_prio_3_packets", "tx_prio_3_bytes",
160         "tx_prio_4_packets", "tx_prio_4_bytes",
161         "tx_prio_5_packets", "tx_prio_5_bytes",
162         "tx_prio_6_packets", "tx_prio_6_bytes",
163         "tx_prio_7_packets", "tx_prio_7_bytes",
164         "tx_novlan_packets", "tx_novlan_bytes",
165
166         /* flow control statistics */
167         "rx_pause_prio_0", "rx_pause_duration_prio_0",
168         "rx_pause_transition_prio_0", "tx_pause_prio_0",
169         "tx_pause_duration_prio_0", "tx_pause_transition_prio_0",
170         "rx_pause_prio_1", "rx_pause_duration_prio_1",
171         "rx_pause_transition_prio_1", "tx_pause_prio_1",
172         "tx_pause_duration_prio_1", "tx_pause_transition_prio_1",
173         "rx_pause_prio_2", "rx_pause_duration_prio_2",
174         "rx_pause_transition_prio_2", "tx_pause_prio_2",
175         "tx_pause_duration_prio_2", "tx_pause_transition_prio_2",
176         "rx_pause_prio_3", "rx_pause_duration_prio_3",
177         "rx_pause_transition_prio_3", "tx_pause_prio_3",
178         "tx_pause_duration_prio_3", "tx_pause_transition_prio_3",
179         "rx_pause_prio_4", "rx_pause_duration_prio_4",
180         "rx_pause_transition_prio_4", "tx_pause_prio_4",
181         "tx_pause_duration_prio_4", "tx_pause_transition_prio_4",
182         "rx_pause_prio_5", "rx_pause_duration_prio_5",
183         "rx_pause_transition_prio_5", "tx_pause_prio_5",
184         "tx_pause_duration_prio_5", "tx_pause_transition_prio_5",
185         "rx_pause_prio_6", "rx_pause_duration_prio_6",
186         "rx_pause_transition_prio_6", "tx_pause_prio_6",
187         "tx_pause_duration_prio_6", "tx_pause_transition_prio_6",
188         "rx_pause_prio_7", "rx_pause_duration_prio_7",
189         "rx_pause_transition_prio_7", "tx_pause_prio_7",
190         "tx_pause_duration_prio_7", "tx_pause_transition_prio_7",
191
192         /* VF statistics */
193         "rx_packets",
194         "rx_bytes",
195         "rx_multicast_packets",
196         "rx_broadcast_packets",
197         "rx_errors",
198         "rx_dropped",
199         "tx_packets",
200         "tx_bytes",
201         "tx_multicast_packets",
202         "tx_broadcast_packets",
203         "tx_errors",
204
205         /* VPort statistics */
206         "vport_rx_unicast_packets",
207         "vport_rx_unicast_bytes",
208         "vport_rx_multicast_packets",
209         "vport_rx_multicast_bytes",
210         "vport_rx_broadcast_packets",
211         "vport_rx_broadcast_bytes",
212         "vport_rx_dropped",
213         "vport_rx_errors",
214         "vport_tx_unicast_packets",
215         "vport_tx_unicast_bytes",
216         "vport_tx_multicast_packets",
217         "vport_tx_multicast_bytes",
218         "vport_tx_broadcast_packets",
219         "vport_tx_broadcast_bytes",
220         "vport_tx_errors",
221
222         /* port statistics */
223         "tx_tso_packets",
224         "tx_queue_stopped", "tx_wake_queue", "tx_timeout", "rx_alloc_failed",
225         "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
226 };
227
228 static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
229         "Interrupt Test",
230         "Link Test",
231         "Speed Test",
232         "Register Test",
233         "Loopback Test",
234 };
235
236 static u32 mlx4_en_get_msglevel(struct net_device *dev)
237 {
238         return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
239 }
240
241 static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
242 {
243         ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
244 }
245
246 static void mlx4_en_get_wol(struct net_device *netdev,
247                             struct ethtool_wolinfo *wol)
248 {
249         struct mlx4_en_priv *priv = netdev_priv(netdev);
250         int err = 0;
251         u64 config = 0;
252         u64 mask;
253
254         if ((priv->port < 1) || (priv->port > 2)) {
255                 en_err(priv, "Failed to get WoL information\n");
256                 return;
257         }
258
259         mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
260                 MLX4_DEV_CAP_FLAG_WOL_PORT2;
261
262         if (!(priv->mdev->dev->caps.flags & mask)) {
263                 wol->supported = 0;
264                 wol->wolopts = 0;
265                 return;
266         }
267
268         err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
269         if (err) {
270                 en_err(priv, "Failed to get WoL information\n");
271                 return;
272         }
273
274         if (config & MLX4_EN_WOL_MAGIC)
275                 wol->supported = WAKE_MAGIC;
276         else
277                 wol->supported = 0;
278
279         if (config & MLX4_EN_WOL_ENABLED)
280                 wol->wolopts = WAKE_MAGIC;
281         else
282                 wol->wolopts = 0;
283 }
284
285 static int mlx4_en_set_wol(struct net_device *netdev,
286                             struct ethtool_wolinfo *wol)
287 {
288         struct mlx4_en_priv *priv = netdev_priv(netdev);
289         u64 config = 0;
290         int err = 0;
291         u64 mask;
292
293         if ((priv->port < 1) || (priv->port > 2))
294                 return -EOPNOTSUPP;
295
296         mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
297                 MLX4_DEV_CAP_FLAG_WOL_PORT2;
298
299         if (!(priv->mdev->dev->caps.flags & mask))
300                 return -EOPNOTSUPP;
301
302         if (wol->supported & ~WAKE_MAGIC)
303                 return -EINVAL;
304
305         err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
306         if (err) {
307                 en_err(priv, "Failed to get WoL info, unable to modify\n");
308                 return err;
309         }
310
311         if (wol->wolopts & WAKE_MAGIC) {
312                 config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
313                                 MLX4_EN_WOL_MAGIC;
314         } else {
315                 config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
316                 config |= MLX4_EN_WOL_DO_MODIFY;
317         }
318
319         err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
320         if (err)
321                 en_err(priv, "Failed to set WoL information\n");
322
323         return err;
324 }
325
326 struct bitmap_sim_iterator {
327         bool advance_array;
328         unsigned long *stats_bitmap;
329         unsigned int count;
330         unsigned int j;
331 };
332
333 static inline void bitmap_sim_iterator_init(struct bitmap_sim_iterator *h,
334                                             unsigned long *stats_bitmap,
335                                             int count)
336 {
337         h->j = 0;
338         h->advance_array = !bitmap_empty(stats_bitmap, count);
339         h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
340                 : count;
341         h->stats_bitmap = stats_bitmap;
342 }
343
344 static inline int bitmap_sim_iterator_test(struct bitmap_sim_iterator *h)
345 {
346         return !h->advance_array ? 1 : test_bit(h->j, h->stats_bitmap);
347 }
348
349 static inline int bitmap_sim_iterator_inc(struct bitmap_sim_iterator *h)
350 {
351         return h->j++;
352 }
353
354 static inline unsigned int bitmap_sim_iterator_count(
355                 struct bitmap_sim_iterator *h)
356 {
357         return h->count;
358 }
359
360 int mlx4_en_get_sset_count(struct net_device *dev, int sset)
361 {
362         struct mlx4_en_priv *priv = netdev_priv(dev);
363         struct bitmap_sim_iterator it;
364
365         int num_of_stats = NUM_ALL_STATS -
366                 ((priv->mdev->dev->caps.flags2 &
367                  MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
368
369         bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
370
371         switch (sset) {
372         case ETH_SS_STATS:
373                 return bitmap_sim_iterator_count(&it) +
374                         (priv->tx_ring_num * 2) +
375 #ifdef LL_EXTENDED_STATS
376                         (priv->rx_ring_num * 5);
377 #else
378                         (priv->rx_ring_num * 2);
379 #endif
380         case ETH_SS_TEST:
381                 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
382                                         & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
383         default:
384                 return -EOPNOTSUPP;
385         }
386 }
387
388 void mlx4_en_get_ethtool_stats(struct net_device *dev,
389                 struct ethtool_stats *stats, u64 *data)
390 {
391         struct mlx4_en_priv *priv = netdev_priv(dev);
392         int index = 0;
393         int i;
394         struct bitmap_sim_iterator it;
395
396         int num_of_stats = NUM_ALL_STATS -
397                 ((priv->mdev->dev->caps.flags2 &
398                  MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
399
400         bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
401
402         if (!data || !priv->port_up)
403                 return;
404
405         spin_lock_bh(&priv->stats_lock);
406
407         for (i = 0; i < NUM_PKT_STATS; i++,
408                         bitmap_sim_iterator_inc(&it))
409                 if (bitmap_sim_iterator_test(&it))
410                         data[index++] =
411                                 ((unsigned long *)&priv->pkstats)[i];
412         for (i = 0; i < NUM_FLOW_STATS; i++,
413                         bitmap_sim_iterator_inc(&it))
414                 if (priv->mdev->dev->caps.flags2 &
415                     MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)
416                         if (bitmap_sim_iterator_test(&it))
417                                 data[index++] =
418                                         ((u64 *)&priv->flowstats)[i];
419         for (i = 0; i < NUM_VF_STATS; i++,
420                         bitmap_sim_iterator_inc(&it))
421                 if (bitmap_sim_iterator_test(&it))
422                         data[index++] =
423                                 ((unsigned long *)&priv->vf_stats)[i];
424         for (i = 0; i < NUM_VPORT_STATS; i++,
425                         bitmap_sim_iterator_inc(&it))
426                 if (bitmap_sim_iterator_test(&it))
427                         data[index++] =
428                                 ((unsigned long *)&priv->vport_stats)[i];
429         for (i = 0; i < NUM_PORT_STATS; i++,
430                         bitmap_sim_iterator_inc(&it))
431                 if (bitmap_sim_iterator_test(&it))
432                         data[index++] =
433                                 ((unsigned long *)&priv->port_stats)[i];
434
435         for (i = 0; i < priv->tx_ring_num; i++) {
436                 data[index++] = priv->tx_ring[i]->packets;
437                 data[index++] = priv->tx_ring[i]->bytes;
438         }
439         for (i = 0; i < priv->rx_ring_num; i++) {
440                 data[index++] = priv->rx_ring[i]->packets;
441                 data[index++] = priv->rx_ring[i]->bytes;
442 #ifdef LL_EXTENDED_STATS
443                 data[index++] = priv->rx_ring[i]->yields;
444                 data[index++] = priv->rx_ring[i]->misses;
445                 data[index++] = priv->rx_ring[i]->cleaned;
446 #endif
447         }
448         spin_unlock_bh(&priv->stats_lock);
449
450 }
451
452 void mlx4_en_restore_ethtool_stats(struct mlx4_en_priv *priv, u64 *data)
453 {
454         int index = 0;
455         int i;
456         struct bitmap_sim_iterator it;
457
458         int num_of_stats = NUM_ALL_STATS -
459                 ((priv->mdev->dev->caps.flags2 &
460                  MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
461
462         bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
463
464         if (!data || !priv->port_up)
465                 return;
466
467         spin_lock_bh(&priv->stats_lock);
468
469         for (i = 0; i < NUM_PKT_STATS; i++,
470               bitmap_sim_iterator_inc(&it))
471                         if (bitmap_sim_iterator_test(&it))
472                                 ((unsigned long *)&priv->pkstats)[i] =
473                                         data[index++];
474         for (i = 0; i < NUM_FLOW_STATS; i++,
475               bitmap_sim_iterator_inc(&it))
476                 if (priv->mdev->dev->caps.flags2 &
477                     MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)
478                         if (bitmap_sim_iterator_test(&it))
479                                 ((u64 *)&priv->flowstats)[i] =
480                                          data[index++];
481         for (i = 0; i < NUM_VF_STATS; i++,
482               bitmap_sim_iterator_inc(&it))
483                         if (bitmap_sim_iterator_test(&it))
484                                 ((unsigned long *)&priv->vf_stats)[i] =
485                                         data[index++];
486         for (i = 0; i < NUM_VPORT_STATS; i++,
487               bitmap_sim_iterator_inc(&it))
488                         if (bitmap_sim_iterator_test(&it))
489                                 ((unsigned long *)&priv->vport_stats)[i] =
490                                         data[index++];
491         for (i = 0; i < NUM_PORT_STATS; i++,
492               bitmap_sim_iterator_inc(&it))
493                         if (bitmap_sim_iterator_test(&it))
494                                 ((unsigned long *)&priv->port_stats)[i] =
495                                         data[index++];
496
497         for (i = 0; i < priv->tx_ring_num; i++) {
498                 priv->tx_ring[i]->packets = data[index++];
499                 priv->tx_ring[i]->bytes = data[index++];
500         }
501         for (i = 0; i < priv->rx_ring_num; i++) {
502                 priv->rx_ring[i]->packets = data[index++];
503                 priv->rx_ring[i]->bytes = data[index++];
504         }
505         spin_unlock_bh(&priv->stats_lock);
506 }
507
508 static void mlx4_en_self_test(struct net_device *dev,
509                               struct ethtool_test *etest, u64 *buf)
510 {
511         mlx4_en_ex_selftest(dev, &etest->flags, buf);
512 }
513
514 static void mlx4_en_get_strings(struct net_device *dev,
515                                 uint32_t stringset, uint8_t *data)
516 {
517         struct mlx4_en_priv *priv = netdev_priv(dev);
518         int index = 0;
519         int i, k;
520         struct bitmap_sim_iterator it;
521
522         int num_of_stats = NUM_ALL_STATS -
523                 ((priv->mdev->dev->caps.flags2 &
524                  MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
525
526         bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
527
528         switch (stringset) {
529         case ETH_SS_TEST:
530                 for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
531                         strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
532                 if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
533                         for (; i < MLX4_EN_NUM_SELF_TEST; i++)
534                                 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
535                 break;
536
537         case ETH_SS_STATS:
538                 /* Add main counters */
539                 for (i = 0; i < NUM_PKT_STATS; i++,
540                      bitmap_sim_iterator_inc(&it))
541                         if (bitmap_sim_iterator_test(&it))
542                                 strcpy(data + (index++) * ETH_GSTRING_LEN,
543                                        main_strings[i]);
544
545                 for (k = 0; k < NUM_FLOW_STATS; k++,
546                      bitmap_sim_iterator_inc(&it))
547                         if (priv->mdev->dev->caps.flags2 &
548                             MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)
549                                 if (bitmap_sim_iterator_test(&it))
550                                         strcpy(data + (index++) *
551                                                ETH_GSTRING_LEN,
552                                                 main_strings[i + k]);
553
554                 for (; (i + k) < num_of_stats; i++,
555                      bitmap_sim_iterator_inc(&it))
556                         if (bitmap_sim_iterator_test(&it))
557                                 strcpy(data + (index++) * ETH_GSTRING_LEN,
558                                        main_strings[i + k]);
559
560                 for (i = 0; i < priv->tx_ring_num; i++) {
561                         sprintf(data + (index++) * ETH_GSTRING_LEN,
562                                 "tx%d_packets", i);
563                         sprintf(data + (index++) * ETH_GSTRING_LEN,
564                                 "tx%d_bytes", i);
565                 }
566                 for (i = 0; i < priv->rx_ring_num; i++) {
567                         sprintf(data + (index++) * ETH_GSTRING_LEN,
568                                 "rx%d_packets", i);
569                         sprintf(data + (index++) * ETH_GSTRING_LEN,
570                                 "rx%d_bytes", i);
571 #ifdef LL_EXTENDED_STATS
572                         sprintf(data + (index++) * ETH_GSTRING_LEN,
573                                 "rx%d_napi_yield", i);
574                         sprintf(data + (index++) * ETH_GSTRING_LEN,
575                                 "rx%d_misses", i);
576                         sprintf(data + (index++) * ETH_GSTRING_LEN,
577                                 "rx%d_cleaned", i);
578 #endif
579                 }
580                 break;
581         }
582 }
583
584 static u32 mlx4_en_autoneg_get(struct net_device *dev)
585 {
586         struct mlx4_en_priv *priv = netdev_priv(dev);
587         struct mlx4_en_dev *mdev = priv->mdev;
588         u32 autoneg = AUTONEG_DISABLE;
589
590         if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
591             priv->port_state.autoneg) {
592                 autoneg = AUTONEG_ENABLE;
593         }
594
595         return autoneg;
596 }
597
598 static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
599 {
600         struct mlx4_en_priv *priv = netdev_priv(dev);
601         int trans_type;
602
603         /* SUPPORTED_1000baseT_Half isn't supported */
604         cmd->supported = SUPPORTED_1000baseT_Full
605                         |SUPPORTED_10000baseT_Full;
606
607         cmd->advertising = ADVERTISED_1000baseT_Full
608                           |ADVERTISED_10000baseT_Full;
609
610         cmd->supported |= SUPPORTED_1000baseKX_Full
611                         |SUPPORTED_10000baseKX4_Full
612                         |SUPPORTED_10000baseKR_Full
613                         |SUPPORTED_10000baseR_FEC
614                         |SUPPORTED_40000baseKR4_Full
615                         |SUPPORTED_40000baseCR4_Full
616                         |SUPPORTED_40000baseSR4_Full
617                         |SUPPORTED_40000baseLR4_Full;
618
619         /* ADVERTISED_1000baseT_Half isn't advertised */
620         cmd->advertising |= ADVERTISED_1000baseKX_Full
621                           |ADVERTISED_10000baseKX4_Full
622                           |ADVERTISED_10000baseKR_Full
623                           |ADVERTISED_10000baseR_FEC
624                           |ADVERTISED_40000baseKR4_Full
625                           |ADVERTISED_40000baseCR4_Full
626                           |ADVERTISED_40000baseSR4_Full
627                           |ADVERTISED_40000baseLR4_Full;
628
629         if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
630                 return -ENOMEM;
631
632         cmd->autoneg = mlx4_en_autoneg_get(dev);
633         if (cmd->autoneg == AUTONEG_ENABLE) {
634                 cmd->supported |= SUPPORTED_Autoneg;
635                 cmd->advertising |= ADVERTISED_Autoneg;
636         }
637
638         trans_type = priv->port_state.transciver;
639         if (netif_carrier_ok(dev)) {
640                 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
641                 cmd->duplex = DUPLEX_FULL;
642         } else {
643                 ethtool_cmd_speed_set(cmd, -1);
644                 cmd->duplex = -1;
645         }
646
647         if (trans_type > 0 && trans_type <= 0xC) {
648                 cmd->port = PORT_FIBRE;
649                 cmd->transceiver = XCVR_EXTERNAL;
650                 cmd->supported |= SUPPORTED_FIBRE;
651                 cmd->advertising |= ADVERTISED_FIBRE;
652         } else if (trans_type == 0x80 || trans_type == 0) {
653                 cmd->port = PORT_TP;
654                 cmd->transceiver = XCVR_INTERNAL;
655                 cmd->supported |= SUPPORTED_TP;
656                 cmd->advertising |= ADVERTISED_TP;
657         } else  {
658                 cmd->port = -1;
659                 cmd->transceiver = -1;
660         }
661         return 0;
662 }
663
664 static const char *mlx4_en_duplex_to_string(int duplex)
665 {
666         switch (duplex) {
667         case DUPLEX_FULL:
668                 return "FULL";
669         case DUPLEX_HALF:
670                 return "HALF";
671         default:
672                 break;
673         }
674         return "UNKNOWN";
675 }
676
677 static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
678 {
679         struct mlx4_en_priv *priv = netdev_priv(dev);
680         struct mlx4_en_port_state *port_state = &priv->port_state;
681
682         if ((cmd->autoneg != port_state->autoneg) ||
683             (ethtool_cmd_speed(cmd) != port_state->link_speed) ||
684             (cmd->duplex != DUPLEX_FULL)) {
685                 en_info(priv, "Changing port state properties (auto-negotiation"
686                               " , speed/duplex) is not supported. Current:"
687                               " auto-negotiation=%d speed/duplex=%d/%s\n",
688                               port_state->autoneg, port_state->link_speed,
689                               mlx4_en_duplex_to_string(DUPLEX_FULL));
690                 return -EOPNOTSUPP;
691         }
692
693         /* User provided same port state properties that are currently set.
694          * Nothing to change
695          */
696         return 0;
697 }
698
699 static int mlx4_en_get_coalesce(struct net_device *dev,
700                               struct ethtool_coalesce *coal)
701 {
702         struct mlx4_en_priv *priv = netdev_priv(dev);
703
704         coal->tx_coalesce_usecs = priv->tx_usecs;
705         coal->tx_max_coalesced_frames = priv->tx_frames;
706         coal->rx_coalesce_usecs = priv->rx_usecs;
707         coal->rx_max_coalesced_frames = priv->rx_frames;
708
709         coal->pkt_rate_low = priv->pkt_rate_low;
710         coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
711         coal->pkt_rate_high = priv->pkt_rate_high;
712         coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
713         coal->rate_sample_interval = priv->sample_interval;
714         coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
715         return 0;
716 }
717
718 static int mlx4_en_set_coalesce(struct net_device *dev,
719                               struct ethtool_coalesce *coal)
720 {
721         struct mlx4_en_priv *priv = netdev_priv(dev);
722         int err, i;
723
724         priv->rx_frames = (coal->rx_max_coalesced_frames ==
725                            MLX4_EN_AUTO_CONF) ?
726                                 MLX4_EN_RX_COAL_TARGET /
727                                 priv->dev->mtu + 1 :
728                                 coal->rx_max_coalesced_frames;
729         priv->rx_usecs = (coal->rx_coalesce_usecs ==
730                           MLX4_EN_AUTO_CONF) ?
731                                 MLX4_EN_RX_COAL_TIME :
732                                 coal->rx_coalesce_usecs;
733
734         /* Setting TX coalescing parameters */
735         if (coal->tx_coalesce_usecs != priv->tx_usecs ||
736             coal->tx_max_coalesced_frames != priv->tx_frames) {
737                 priv->tx_usecs = coal->tx_coalesce_usecs;
738                 priv->tx_frames = coal->tx_max_coalesced_frames;
739                 if (priv->port_up) {
740                         for (i = 0; i < priv->tx_ring_num; i++) {
741                                 priv->tx_cq[i]->moder_cnt = priv->tx_frames;
742                                 priv->tx_cq[i]->moder_time = priv->tx_usecs;
743                                 if (mlx4_en_set_cq_moder(priv, priv->tx_cq[i]))
744                                         en_warn(priv, "Failed changing moderation for TX cq %d\n", i);
745                         }
746                 }
747         }
748
749         /* Set adaptive coalescing params */
750         priv->pkt_rate_low = coal->pkt_rate_low;
751         priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
752         priv->pkt_rate_high = coal->pkt_rate_high;
753         priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
754         priv->sample_interval = coal->rate_sample_interval;
755         priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
756         if (priv->adaptive_rx_coal)
757                 return 0;
758
759         if (priv->port_up) {
760                 for (i = 0; i < priv->rx_ring_num; i++) {
761                         priv->rx_cq[i]->moder_cnt = priv->rx_frames;
762                         priv->rx_cq[i]->moder_time = priv->rx_usecs;
763                         priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
764                                 err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
765                         if (err)
766                                 return err;
767                 }
768         }
769
770         return 0;
771 }
772
773 static int mlx4_en_set_pauseparam(struct net_device *dev,
774                                 struct ethtool_pauseparam *pause)
775 {
776         struct mlx4_en_priv *priv = netdev_priv(dev);
777         struct mlx4_en_dev *mdev = priv->mdev;
778         int err;
779
780         if (pause->autoneg)
781                 return -EOPNOTSUPP;
782
783         priv->prof->tx_pause = pause->tx_pause != 0;
784         priv->prof->rx_pause = pause->rx_pause != 0;
785         err = mlx4_SET_PORT_general(mdev->dev, priv->port,
786                                     priv->rx_skb_size + ETH_FCS_LEN,
787                                     priv->prof->tx_pause,
788                                     priv->prof->tx_ppp,
789                                     priv->prof->rx_pause,
790                                     priv->prof->rx_ppp);
791         if (err)
792                 en_err(priv, "Failed setting pause params\n");
793
794         return err;
795 }
796
797 static void mlx4_en_get_pauseparam(struct net_device *dev,
798                                  struct ethtool_pauseparam *pause)
799 {
800         struct mlx4_en_priv *priv = netdev_priv(dev);
801
802         pause->tx_pause = priv->prof->tx_pause;
803         pause->rx_pause = priv->prof->rx_pause;
804         pause->autoneg = mlx4_en_autoneg_get(dev);
805 }
806
807 /* rtnl lock must be taken before calling */
808 int mlx4_en_pre_config(struct mlx4_en_priv *priv)
809 {
810 #ifdef CONFIG_RFS_ACCEL
811         struct cpu_rmap *rmap;
812
813         if (!priv->dev->rx_cpu_rmap)
814                 return 0;
815
816         /* Disable RFS events
817          * Must have all RFS jobs flushed before freeing resources
818          */
819         rmap = priv->dev->rx_cpu_rmap;
820         priv->dev->rx_cpu_rmap = NULL;
821
822         rtnl_unlock();
823         free_irq_cpu_rmap(rmap);
824         rtnl_lock();
825
826         if (priv->dev->rx_cpu_rmap)
827                 return -EBUSY; /* another configuration completed while lock
828                                 * was free
829                                 */
830
831         /* Make sure all currently running filter_work are being processed
832          * Other work will return immediatly because of disable_rfs
833          */
834         flush_workqueue(priv->mdev->workqueue);
835
836 #endif
837
838         return 0;
839 }
840
841 static int mlx4_en_set_ringparam(struct net_device *dev,
842                                  struct ethtool_ringparam *param)
843 {
844         struct mlx4_en_priv *priv = netdev_priv(dev);
845         struct mlx4_en_dev *mdev = priv->mdev;
846         u32 rx_size, tx_size;
847         int port_up = 0;
848         int err = 0;
849         int i, n_stats;
850         u64 *data = NULL;
851
852         if (!priv->port_up)
853                 return -ENOMEM;
854
855         if (param->rx_jumbo_pending || param->rx_mini_pending)
856                 return -EINVAL;
857
858         rx_size = roundup_pow_of_two(param->rx_pending);
859         rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
860         rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
861         tx_size = roundup_pow_of_two(param->tx_pending);
862         tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
863         tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
864
865         if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
866                                         priv->rx_ring[0]->size) &&
867             tx_size == priv->tx_ring[0]->size)
868                 return 0;
869         err = mlx4_en_pre_config(priv);
870         if (err)
871                 return err;
872
873         mutex_lock(&mdev->state_lock);
874         if (priv->port_up) {
875                 port_up = 1;
876                 mlx4_en_stop_port(dev);
877         }
878
879         /* Cache port statistics */
880         n_stats = mlx4_en_get_sset_count(dev, ETH_SS_STATS);
881         if (n_stats > 0) {
882                 data = kmalloc(n_stats * sizeof(u64), GFP_KERNEL);
883                 if (data)
884                         mlx4_en_get_ethtool_stats(dev, NULL, data);
885         }
886
887         mlx4_en_free_resources(priv);
888
889         priv->prof->tx_ring_size = tx_size;
890         priv->prof->rx_ring_size = rx_size;
891
892         err = mlx4_en_alloc_resources(priv);
893         if (err) {
894                 en_err(priv, "Failed reallocating port resources\n");
895                 goto out;
896         }
897
898         /* Restore port statistics */
899         if (n_stats > 0 && data)
900                 mlx4_en_restore_ethtool_stats(priv, data);
901
902         if (port_up) {
903                 err = mlx4_en_start_port(dev);
904                 if (err) {
905                         en_err(priv, "Failed starting port\n");
906                         goto out;
907                 }
908
909                 for (i = 0; i < priv->rx_ring_num; i++) {
910                         priv->rx_cq[i]->moder_cnt = priv->rx_frames;
911                         priv->rx_cq[i]->moder_time = priv->rx_usecs;
912                         priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
913                         err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
914                         if (err)
915                                 goto out;
916                 }
917         }
918
919 out:
920         kfree(data);
921         mutex_unlock(&mdev->state_lock);
922         return err;
923 }
924
925 static void mlx4_en_get_ringparam(struct net_device *dev,
926                                   struct ethtool_ringparam *param)
927 {
928         struct mlx4_en_priv *priv = netdev_priv(dev);
929
930         if (!priv->port_up)
931                 return;
932
933         memset(param, 0, sizeof(*param));
934         param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
935         param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
936         param->rx_pending = priv->port_up ?
937                 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
938         param->tx_pending = priv->tx_ring[0]->size;
939 }
940
941 static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
942 {
943         struct mlx4_en_priv *priv = netdev_priv(dev);
944
945         return priv->rx_ring_num;
946 }
947
948 static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
949 {
950         struct mlx4_en_priv *priv = netdev_priv(dev);
951         struct mlx4_en_rss_map *rss_map = &priv->rss_map;
952         int rss_rings;
953         size_t n = priv->rx_ring_num;
954         int err = 0;
955
956         rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
957         rss_rings = 1 << ilog2(rss_rings);
958
959         while (n--) {
960                 ring_index[n] = rss_map->qps[n % rss_rings].qpn -
961                         rss_map->base_qpn;
962         }
963
964         return err;
965 }
966
967 static int mlx4_en_set_rxfh_indir(struct net_device *dev,
968                 const u32 *ring_index)
969 {
970         struct mlx4_en_priv *priv = netdev_priv(dev);
971         struct mlx4_en_dev *mdev = priv->mdev;
972         int port_up = 0;
973         int err = 0;
974         int i;
975         int rss_rings = 0;
976
977         /* Calculate RSS table size and make sure flows are spread evenly
978          * between rings
979          */
980         for (i = 0; i < priv->rx_ring_num; i++) {
981                 if (i > 0 && !ring_index[i] && !rss_rings)
982                         rss_rings = i;
983
984                 if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
985                         return -EINVAL;
986         }
987
988         if (!rss_rings)
989                 rss_rings = priv->rx_ring_num;
990
991         /* RSS table size must be an order of 2 */
992         if (!is_power_of_2(rss_rings))
993                 return -EINVAL;
994
995         mutex_lock(&mdev->state_lock);
996         if (priv->port_up) {
997                 port_up = 1;
998                 mlx4_en_stop_port(dev);
999         }
1000
1001         priv->prof->rss_rings = rss_rings;
1002
1003         if (port_up) {
1004                 err = mlx4_en_start_port(dev);
1005                 if (err)
1006                         en_err(priv, "Failed starting port\n");
1007         }
1008
1009         mutex_unlock(&mdev->state_lock);
1010         return err;
1011 }
1012
1013 #define all_zeros_or_all_ones(field)            \
1014         ((field) == 0 || (field) == (__force typeof(field))-1)
1015
1016 static int mlx4_en_validate_flow(struct net_device *dev,
1017                                  struct mlx4_ethtool_rxnfc *cmd)
1018 {
1019         struct ethtool_usrip4_spec *l3_mask;
1020         struct ethtool_tcpip4_spec *l4_mask;
1021         struct ethhdr *eth_mask;
1022
1023         if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1024                 return -EINVAL;
1025
1026         if (cmd->fs.flow_type & FLOW_MAC_EXT) {
1027                 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
1028                 if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
1029                         return -EINVAL;
1030         }
1031
1032         switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1033         case TCP_V4_FLOW:
1034         case UDP_V4_FLOW:
1035                 if (cmd->fs.m_u.tcp_ip4_spec.tos)
1036                         return -EINVAL;
1037                 l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
1038                 /* don't allow mask which isn't all 0 or 1 */
1039                 if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
1040                     !all_zeros_or_all_ones(l4_mask->ip4dst) ||
1041                     !all_zeros_or_all_ones(l4_mask->psrc) ||
1042                     !all_zeros_or_all_ones(l4_mask->pdst))
1043                         return -EINVAL;
1044                 break;
1045         case IP_USER_FLOW:
1046                 l3_mask = &cmd->fs.m_u.usr_ip4_spec;
1047                 if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
1048                     cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
1049                     (!l3_mask->ip4src && !l3_mask->ip4dst) ||
1050                     !all_zeros_or_all_ones(l3_mask->ip4src) ||
1051                     !all_zeros_or_all_ones(l3_mask->ip4dst))
1052                         return -EINVAL;
1053                 break;
1054         case ETHER_FLOW:
1055                 eth_mask = &cmd->fs.m_u.ether_spec;
1056                 /* source mac mask must not be set */
1057                 if (!is_zero_ether_addr(eth_mask->h_source))
1058                         return -EINVAL;
1059
1060                 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
1061                 if (!is_broadcast_ether_addr(eth_mask->h_dest))
1062                         return -EINVAL;
1063
1064                 if (!all_zeros_or_all_ones(eth_mask->h_proto))
1065                         return -EINVAL;
1066                 break;
1067         default:
1068                 return -EINVAL;
1069         }
1070
1071         if ((cmd->fs.flow_type & FLOW_EXT)) {
1072                 if (cmd->fs.m_ext.vlan_etype ||
1073                     !(cmd->fs.m_ext.vlan_tci == 0 ||
1074                       cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
1075                         return -EINVAL;
1076                 if (cmd->fs.m_ext.vlan_tci) {
1077                         if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) <
1078                             VLAN_MIN_VALUE ||
1079                             be16_to_cpu(cmd->fs.h_ext.vlan_tci) >
1080                             VLAN_MAX_VALUE)
1081                                 return -EINVAL;
1082                 }
1083         }
1084
1085         return 0;
1086 }
1087
1088 static int mlx4_en_ethtool_add_mac_rule(struct mlx4_ethtool_rxnfc *cmd,
1089                                         struct list_head *rule_list_h,
1090                                         struct mlx4_spec_list *spec_l2,
1091                                         unsigned char *mac)
1092 {
1093         int err = 0;
1094         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
1095
1096         spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
1097         memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
1098         memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
1099
1100         if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
1101                 spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
1102                 spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
1103         }
1104
1105         list_add_tail(&spec_l2->list, rule_list_h);
1106
1107         return err;
1108 }
1109
1110 static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
1111                                                 struct mlx4_ethtool_rxnfc *cmd,
1112                                                 struct list_head *rule_list_h,
1113                                                 struct mlx4_spec_list *spec_l2,
1114                                                 __be32 ipv4_dst)
1115 {
1116         unsigned char mac[ETH_ALEN];
1117
1118         if (!ipv4_is_multicast(ipv4_dst)) {
1119                 if (cmd->fs.flow_type & FLOW_MAC_EXT)
1120                         memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
1121                 else
1122                         memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
1123         } else {
1124                 ip_eth_mc_map(ipv4_dst, mac);
1125         }
1126
1127         return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
1128 }
1129
1130 static int add_ip_rule(struct mlx4_en_priv *priv,
1131                                 struct mlx4_ethtool_rxnfc *cmd,
1132                                 struct list_head *list_h)
1133 {
1134         struct mlx4_spec_list *spec_l2 = NULL;
1135         struct mlx4_spec_list *spec_l3 = NULL;
1136         struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
1137
1138         spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
1139         spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
1140         if (!spec_l2 || !spec_l3) {
1141                 en_err(priv, "Fail to alloc ethtool rule.\n");
1142                 kfree(spec_l2);
1143                 kfree(spec_l3);
1144                 return -ENOMEM;
1145         }
1146
1147         mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
1148                                              cmd->fs.h_u.
1149                                              usr_ip4_spec.ip4dst);
1150         spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
1151         spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
1152         if (l3_mask->ip4src)
1153                 spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
1154         spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
1155         if (l3_mask->ip4dst)
1156                 spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
1157         list_add_tail(&spec_l3->list, list_h);
1158
1159         return 0;
1160 }
1161
1162 static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
1163                              struct mlx4_ethtool_rxnfc *cmd,
1164                              struct list_head *list_h, int proto)
1165 {
1166         struct mlx4_spec_list *spec_l2 = NULL;
1167         struct mlx4_spec_list *spec_l3 = NULL;
1168         struct mlx4_spec_list *spec_l4 = NULL;
1169         struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
1170
1171         spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
1172         spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
1173         spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
1174         if (!spec_l2 || !spec_l3 || !spec_l4) {
1175                 en_err(priv, "Fail to alloc ethtool rule.\n");
1176                 kfree(spec_l2);
1177                 kfree(spec_l3);
1178                 kfree(spec_l4);
1179                 return -ENOMEM;
1180         }
1181
1182         spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
1183
1184         if (proto == TCP_V4_FLOW) {
1185                 mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
1186                                                      spec_l2,
1187                                                      cmd->fs.h_u.
1188                                                      tcp_ip4_spec.ip4dst);
1189                 spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
1190                 spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
1191                 spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
1192                 spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
1193                 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
1194         } else {
1195                 mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
1196                                                      spec_l2,
1197                                                      cmd->fs.h_u.
1198                                                      udp_ip4_spec.ip4dst);
1199                 spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
1200                 spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
1201                 spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
1202                 spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
1203                 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
1204         }
1205
1206         if (l4_mask->ip4src)
1207                 spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
1208         if (l4_mask->ip4dst)
1209                 spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
1210
1211         if (l4_mask->psrc)
1212                 spec_l4->tcp_udp.src_port_msk = MLX4_BE_SHORT_MASK;
1213         if (l4_mask->pdst)
1214                 spec_l4->tcp_udp.dst_port_msk = MLX4_BE_SHORT_MASK;
1215
1216         list_add_tail(&spec_l3->list, list_h);
1217         list_add_tail(&spec_l4->list, list_h);
1218
1219         return 0;
1220 }
1221
1222 static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
1223                                              struct mlx4_ethtool_rxnfc *cmd,
1224                                              struct list_head *rule_list_h)
1225 {
1226         int err;
1227         struct ethhdr *eth_spec;
1228         struct mlx4_spec_list *spec_l2;
1229         struct mlx4_en_priv *priv = netdev_priv(dev);
1230
1231         err = mlx4_en_validate_flow(dev, cmd);
1232         if (err)
1233                 return err;
1234
1235         switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1236         case ETHER_FLOW:
1237                 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
1238                 if (!spec_l2)
1239                         return -ENOMEM;
1240
1241                 eth_spec = &cmd->fs.h_u.ether_spec;
1242                 mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &eth_spec->h_dest[0]);
1243                 spec_l2->eth.ether_type = eth_spec->h_proto;
1244                 if (eth_spec->h_proto)
1245                         spec_l2->eth.ether_type_enable = 1;
1246                 break;
1247         case IP_USER_FLOW:
1248                 err = add_ip_rule(priv, cmd, rule_list_h);
1249                 break;
1250         case TCP_V4_FLOW:
1251                 err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
1252                 break;
1253         case UDP_V4_FLOW:
1254                 err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
1255                 break;
1256         }
1257
1258         return err;
1259 }
1260
1261 static int mlx4_en_flow_replace(struct net_device *dev,
1262                                 struct mlx4_ethtool_rxnfc *cmd)
1263 {
1264         int err;
1265         struct mlx4_en_priv *priv = netdev_priv(dev);
1266         struct mlx4_en_dev *mdev = priv->mdev;
1267         struct ethtool_flow_id *loc_rule;
1268         struct mlx4_spec_list *spec, *tmp_spec;
1269         u32 qpn;
1270         u64 reg_id;
1271
1272         struct mlx4_net_trans_rule rule = {
1273                 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1274                 .exclusive = 0,
1275                 .allow_loopback = 1,
1276                 .promisc_mode = MLX4_FS_REGULAR,
1277         };
1278
1279         rule.port = priv->port;
1280         rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
1281         INIT_LIST_HEAD(&rule.list);
1282
1283         /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
1284         if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
1285                 qpn = priv->drop_qp.qpn;
1286         else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
1287                 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
1288         } else {
1289                 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
1290                         en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
1291                                 cmd->fs.ring_cookie);
1292                         return -EINVAL;
1293                 }
1294                 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
1295                 if (!qpn) {
1296                         en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
1297                                 cmd->fs.ring_cookie);
1298                         return -EINVAL;
1299                 }
1300         }
1301         rule.qpn = qpn;
1302         err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
1303         if (err)
1304                 goto out_free_list;
1305
1306         mutex_lock(&mdev->state_lock);
1307         loc_rule = &priv->ethtool_rules[cmd->fs.location];
1308         if (loc_rule->id) {
1309                 err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
1310                 if (err) {
1311                         en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
1312                                cmd->fs.location, loc_rule->id);
1313                         goto unlock;
1314                 }
1315                 loc_rule->id = 0;
1316                 memset(&loc_rule->flow_spec, 0,
1317                        sizeof(struct ethtool_rx_flow_spec));
1318                 list_del(&loc_rule->list);
1319         }
1320         err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
1321         if (err) {
1322                 en_err(priv, "Fail to attach network rule at location %d.\n",
1323                        cmd->fs.location);
1324                 goto unlock;
1325         }
1326         loc_rule->id = reg_id;
1327         memcpy(&loc_rule->flow_spec, &cmd->fs,
1328                sizeof(struct ethtool_rx_flow_spec));
1329         list_add_tail(&loc_rule->list, &priv->ethtool_list);
1330
1331 unlock:
1332         mutex_unlock(&mdev->state_lock);
1333 out_free_list:
1334         list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
1335                 list_del(&spec->list);
1336                 kfree(spec);
1337         }
1338         return err;
1339 }
1340
1341 static int mlx4_en_flow_detach(struct net_device *dev,
1342                                struct mlx4_ethtool_rxnfc *cmd)
1343 {
1344         int err = 0;
1345         struct ethtool_flow_id *rule;
1346         struct mlx4_en_priv *priv = netdev_priv(dev);
1347         struct mlx4_en_dev *mdev = priv->mdev;
1348
1349         if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1350                 return -EINVAL;
1351
1352         mutex_lock(&mdev->state_lock);
1353         rule = &priv->ethtool_rules[cmd->fs.location];
1354         if (!rule->id) {
1355                 err =  -ENOENT;
1356                 goto out;
1357         }
1358
1359         err = mlx4_flow_detach(priv->mdev->dev, rule->id);
1360         if (err) {
1361                 en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
1362                        cmd->fs.location, rule->id);
1363                 goto out;
1364         }
1365         rule->id = 0;
1366         memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
1367
1368         list_del(&rule->list);
1369 out:
1370         mutex_unlock(&mdev->state_lock);
1371         return err;
1372
1373 }
1374
1375 static int mlx4_en_get_flow(struct net_device *dev, struct mlx4_ethtool_rxnfc *cmd,
1376                             int loc)
1377 {
1378         int err = 0;
1379         struct ethtool_flow_id *rule;
1380         struct mlx4_en_priv *priv = netdev_priv(dev);
1381
1382         if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
1383                 return -EINVAL;
1384
1385         rule = &priv->ethtool_rules[loc];
1386         if (rule->id)
1387                 memcpy(&cmd->fs, &rule->flow_spec,
1388                        sizeof(struct ethtool_rx_flow_spec));
1389         else
1390                 err = -ENOENT;
1391
1392         return err;
1393 }
1394
1395 static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
1396 {
1397
1398         int i, res = 0;
1399         for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
1400                 if (priv->ethtool_rules[i].id)
1401                         res++;
1402         }
1403         return res;
1404
1405 }
1406
1407 static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *c,
1408                              u32 *rule_locs)
1409 {
1410         struct mlx4_en_priv *priv = netdev_priv(dev);
1411         struct mlx4_en_dev *mdev = priv->mdev;
1412         int err = 0;
1413         int i = 0, priority = 0;
1414         struct mlx4_ethtool_rxnfc *cmd = (struct mlx4_ethtool_rxnfc *)c;
1415
1416         if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
1417              cmd->cmd == ETHTOOL_GRXCLSRULE ||
1418              cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
1419             (mdev->dev->caps.steering_mode !=
1420              MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
1421                 return -EINVAL;
1422
1423         switch (cmd->cmd) {
1424         case ETHTOOL_GRXRINGS:
1425                 cmd->data = priv->rx_ring_num;
1426                 break;
1427         case ETHTOOL_GRXCLSRLCNT:
1428                 cmd->rule_cnt = mlx4_en_get_num_flows(priv);
1429                 break;
1430         case ETHTOOL_GRXCLSRULE:
1431                 err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
1432                 break;
1433         case ETHTOOL_GRXCLSRLALL:
1434                 while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
1435                         err = mlx4_en_get_flow(dev, cmd, i);
1436                         if (!err)
1437                                 rule_locs[priority++] = i;
1438                         i++;
1439                 }
1440                 err = 0;
1441                 break;
1442         default:
1443                 err = -EOPNOTSUPP;
1444                 break;
1445         }
1446
1447         return err;
1448 }
1449
1450 static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *c)
1451 {
1452         int err = 0;
1453         struct mlx4_en_priv *priv = netdev_priv(dev);
1454         struct mlx4_en_dev *mdev = priv->mdev;
1455         struct mlx4_ethtool_rxnfc *cmd = (struct mlx4_ethtool_rxnfc *)c;
1456
1457         if (mdev->dev->caps.steering_mode !=
1458             MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
1459                 return -EINVAL;
1460
1461         switch (cmd->cmd) {
1462         case ETHTOOL_SRXCLSRLINS:
1463                 err = mlx4_en_flow_replace(dev, cmd);
1464                 break;
1465         case ETHTOOL_SRXCLSRLDEL:
1466                 err = mlx4_en_flow_detach(dev, cmd);
1467                 break;
1468         default:
1469                 en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
1470                 return -EINVAL;
1471         }
1472
1473         return err;
1474 }
1475
1476 static void mlx4_en_get_channels(struct net_device *dev,
1477                                  struct ethtool_channels *channel)
1478 {
1479         struct mlx4_en_priv *priv = netdev_priv(dev);
1480
1481         memset(channel, 0, sizeof(*channel));
1482
1483         channel->max_rx = MAX_RX_RINGS;
1484         channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
1485
1486         channel->rx_count = priv->rx_ring_num;
1487         channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
1488 }
1489
1490 static int mlx4_en_set_channels(struct net_device *dev,
1491                                 struct ethtool_channels *channel)
1492 {
1493         struct mlx4_en_priv *priv = netdev_priv(dev);
1494         struct mlx4_en_dev *mdev = priv->mdev;
1495         int port_up = 0;
1496         int i;
1497         int err = 0;
1498
1499         if (channel->other_count || channel->combined_count ||
1500             channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
1501             channel->rx_count > MAX_RX_RINGS ||
1502             !channel->tx_count || !channel->rx_count)
1503                 return -EINVAL;
1504
1505         err = mlx4_en_pre_config(priv);
1506         if (err)
1507                 return err;
1508
1509         mutex_lock(&mdev->state_lock);
1510         if (priv->port_up) {
1511                 port_up = 1;
1512                 mlx4_en_stop_port(dev);
1513         }
1514
1515         mlx4_en_free_resources(priv);
1516
1517         priv->num_tx_rings_p_up = channel->tx_count;
1518         priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
1519         priv->rx_ring_num = channel->rx_count;
1520
1521         err = mlx4_en_alloc_resources(priv);
1522         if (err) {
1523                 en_err(priv, "Failed reallocating port resources\n");
1524                 goto out;
1525         }
1526
1527         netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1528         netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1529
1530         mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
1531
1532         en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
1533         en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
1534
1535         if (port_up) {
1536                 err = mlx4_en_start_port(dev);
1537                 if (err)
1538                         en_err(priv, "Failed starting port\n");
1539
1540                 for (i = 0; i < priv->rx_ring_num; i++) {
1541                         priv->rx_cq[i]->moder_cnt = priv->rx_frames;
1542                         priv->rx_cq[i]->moder_time = priv->rx_usecs;
1543                         priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1544                         err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
1545                         if (err)
1546                                 goto out;
1547                 }
1548         }
1549
1550 out:
1551         mutex_unlock(&mdev->state_lock);
1552         return err;
1553 }
1554
1555 static int mlx4_en_get_ts_info(struct net_device *dev,
1556                                struct ethtool_ts_info *info)
1557 {
1558         struct mlx4_en_priv *priv = netdev_priv(dev);
1559         struct mlx4_en_dev *mdev = priv->mdev;
1560         int ret;
1561
1562         ret = ethtool_op_get_ts_info(dev, info);
1563         if (ret)
1564                 return ret;
1565
1566         if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
1567                 info->so_timestamping |=
1568                         SOF_TIMESTAMPING_TX_HARDWARE |
1569                         SOF_TIMESTAMPING_RX_HARDWARE |
1570                         SOF_TIMESTAMPING_RAW_HARDWARE;
1571
1572                 info->tx_types =
1573                         (1 << HWTSTAMP_TX_OFF) |
1574                         (1 << HWTSTAMP_TX_ON);
1575
1576                 info->rx_filters =
1577                         (1 << HWTSTAMP_FILTER_NONE) |
1578                         (1 << HWTSTAMP_FILTER_ALL);
1579         }
1580
1581         return ret;
1582 }
1583
1584 const struct ethtool_ops mlx4_en_ethtool_ops = {
1585         .get_drvinfo = mlx4_en_get_drvinfo,
1586         .get_settings = mlx4_en_get_settings,
1587         .set_settings = mlx4_en_set_settings,
1588         .get_link = ethtool_op_get_link,
1589         .get_strings = mlx4_en_get_strings,
1590         .get_sset_count = mlx4_en_get_sset_count,
1591         .get_ethtool_stats = mlx4_en_get_ethtool_stats,
1592         .self_test = mlx4_en_self_test,
1593         .get_wol = mlx4_en_get_wol,
1594         .set_wol = mlx4_en_set_wol,
1595         .get_msglevel = mlx4_en_get_msglevel,
1596         .set_msglevel = mlx4_en_set_msglevel,
1597         .get_coalesce = mlx4_en_get_coalesce,
1598         .set_coalesce = mlx4_en_set_coalesce,
1599         .get_pauseparam = mlx4_en_get_pauseparam,
1600         .set_pauseparam = mlx4_en_set_pauseparam,
1601         .get_ringparam = mlx4_en_get_ringparam,
1602         .set_ringparam = mlx4_en_set_ringparam,
1603         .get_rxnfc = mlx4_en_get_rxnfc,
1604         .set_rxnfc = mlx4_en_set_rxnfc,
1605         .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
1606         .get_rxfh_indir = mlx4_en_get_rxfh_indir,
1607         .set_rxfh_indir = mlx4_en_set_rxfh_indir,
1608         .get_channels = mlx4_en_get_channels,
1609         .set_channels = mlx4_en_set_channels,
1610         .get_ts_info = mlx4_en_get_ts_info,
1611 };
1612
1613
1614
1615
1616