2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #ifdef CONFIG_NET_RX_BUSY_POLL
38 #include <net/busy_poll.h>
41 #include <linux/list.h>
42 #include <linux/if_ether.h>
44 #include <linux/mlx4/driver.h>
45 #include <linux/mlx4/device.h>
46 #include <linux/mlx4/cmd.h>
47 #include <linux/mlx4/cq.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
55 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv);
56 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv);
57 static int mlx4_en_unit;
59 #ifdef CONFIG_NET_RX_BUSY_POLL
60 /* must be called with local_bh_disable()d */
61 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
63 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
64 struct net_device *dev = cq->dev;
65 struct mlx4_en_priv *priv = netdev_priv(dev);
66 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
70 return LL_FLUSH_FAILED;
72 if (!mlx4_en_cq_lock_poll(cq))
75 done = mlx4_en_process_rx_cq(dev, cq, 4);
76 #ifdef LL_EXTENDED_STATS
78 rx_ring->cleaned += done;
83 mlx4_en_cq_unlock_poll(cq);
87 #endif /* CONFIG_NET_RX_BUSY_POLL */
89 #ifdef CONFIG_RFS_ACCEL
91 struct mlx4_en_filter {
92 struct list_head next;
93 struct work_struct work;
102 struct mlx4_en_priv *priv;
103 u32 flow_id; /* RFS infrastructure id */
104 int id; /* mlx4_en driver id */
105 u64 reg_id; /* Flow steering API id */
106 u8 activated; /* Used to prevent expiry before filter
109 struct hlist_node filter_chain;
112 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
114 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
118 return MLX4_NET_TRANS_RULE_ID_UDP;
120 return MLX4_NET_TRANS_RULE_ID_TCP;
122 return -EPROTONOSUPPORT;
126 static void mlx4_en_filter_work(struct work_struct *work)
128 struct mlx4_en_filter *filter = container_of(work,
129 struct mlx4_en_filter,
131 struct mlx4_en_priv *priv = filter->priv;
132 struct mlx4_spec_list spec_tcp_udp = {
133 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
136 .dst_port = filter->dst_port,
137 .dst_port_msk = (__force __be16)-1,
138 .src_port = filter->src_port,
139 .src_port_msk = (__force __be16)-1,
143 struct mlx4_spec_list spec_ip = {
144 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
147 .dst_ip = filter->dst_ip,
148 .dst_ip_msk = (__force __be32)-1,
149 .src_ip = filter->src_ip,
150 .src_ip_msk = (__force __be32)-1,
154 struct mlx4_spec_list spec_eth = {
155 .id = MLX4_NET_TRANS_RULE_ID_ETH,
157 struct mlx4_net_trans_rule rule = {
158 .list = LIST_HEAD_INIT(rule.list),
159 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
162 .promisc_mode = MLX4_FS_REGULAR,
164 .priority = MLX4_DOMAIN_RFS,
167 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
169 if (spec_tcp_udp.id < 0) {
170 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
174 list_add_tail(&spec_eth.list, &rule.list);
175 list_add_tail(&spec_ip.list, &rule.list);
176 list_add_tail(&spec_tcp_udp.list, &rule.list);
178 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
179 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
180 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
182 filter->activated = 0;
184 if (filter->reg_id) {
185 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
186 if (rc && rc != -ENOENT)
187 en_err(priv, "Error detaching flow. rc = %d\n", rc);
190 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
192 en_err(priv, "Error attaching flow. err = %d\n", rc);
195 mlx4_en_filter_rfs_expire(priv);
197 filter->activated = 1;
200 static inline struct hlist_head *
201 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
202 __be16 src_port, __be16 dst_port)
207 l = (__force unsigned long)src_port |
208 ((__force unsigned long)dst_port << 2);
209 l ^= (__force unsigned long)(src_ip ^ dst_ip);
211 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
213 return &priv->filter_hash[bucket_idx];
216 static struct mlx4_en_filter *
217 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
218 __be32 dst_ip, u8 ip_proto, __be16 src_port,
219 __be16 dst_port, u32 flow_id)
221 struct mlx4_en_filter *filter = NULL;
223 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
228 filter->rxq_index = rxq_index;
229 INIT_WORK(&filter->work, mlx4_en_filter_work);
231 filter->src_ip = src_ip;
232 filter->dst_ip = dst_ip;
233 filter->ip_proto = ip_proto;
234 filter->src_port = src_port;
235 filter->dst_port = dst_port;
237 filter->flow_id = flow_id;
239 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
241 list_add_tail(&filter->next, &priv->filters);
242 hlist_add_head(&filter->filter_chain,
243 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
249 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
251 struct mlx4_en_priv *priv = filter->priv;
254 list_del(&filter->next);
256 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
257 if (rc && rc != -ENOENT)
258 en_err(priv, "Error detaching flow. rc = %d\n", rc);
263 static inline struct mlx4_en_filter *
264 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
265 u8 ip_proto, __be16 src_port, __be16 dst_port)
267 struct hlist_node *elem;
268 struct mlx4_en_filter *filter;
269 struct mlx4_en_filter *ret = NULL;
271 hlist_for_each_entry(filter, elem,
272 filter_hash_bucket(priv, src_ip, dst_ip,
275 if (filter->src_ip == src_ip &&
276 filter->dst_ip == dst_ip &&
277 filter->ip_proto == ip_proto &&
278 filter->src_port == src_port &&
279 filter->dst_port == dst_port) {
289 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
290 u16 rxq_index, u32 flow_id)
292 struct mlx4_en_priv *priv = netdev_priv(net_dev);
293 struct mlx4_en_filter *filter;
294 const struct iphdr *ip;
301 int nhoff = skb_network_offset(skb);
304 if (skb->protocol != htons(ETH_P_IP))
305 return -EPROTONOSUPPORT;
307 ip = (const struct iphdr *)(skb->data + nhoff);
308 if (ip_is_fragment(ip))
309 return -EPROTONOSUPPORT;
311 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
312 return -EPROTONOSUPPORT;
313 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
315 ip_proto = ip->protocol;
321 spin_lock_bh(&priv->filters_lock);
322 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
325 if (filter->rxq_index == rxq_index)
328 filter->rxq_index = rxq_index;
330 filter = mlx4_en_filter_alloc(priv, rxq_index,
331 src_ip, dst_ip, ip_proto,
332 src_port, dst_port, flow_id);
339 queue_work(priv->mdev->workqueue, &filter->work);
344 spin_unlock_bh(&priv->filters_lock);
349 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
350 struct mlx4_en_rx_ring *rx_ring)
352 struct mlx4_en_filter *filter, *tmp;
355 spin_lock_bh(&priv->filters_lock);
356 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
357 list_move(&filter->next, &del_list);
358 hlist_del(&filter->filter_chain);
360 spin_unlock_bh(&priv->filters_lock);
362 list_for_each_entry_safe(filter, tmp, &del_list, next) {
363 cancel_work_sync(&filter->work);
364 mlx4_en_filter_free(filter);
368 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
370 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
374 spin_lock_bh(&priv->filters_lock);
375 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
376 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
379 if (filter->activated &&
380 !work_pending(&filter->work) &&
381 rps_may_expire_flow(priv->dev,
382 filter->rxq_index, filter->flow_id,
384 list_move(&filter->next, &del_list);
385 hlist_del(&filter->filter_chain);
387 last_filter = filter;
392 if (last_filter && (&last_filter->next != priv->filters.next))
393 list_move(&priv->filters, &last_filter->next);
395 spin_unlock_bh(&priv->filters_lock);
397 list_for_each_entry_safe(filter, tmp, &del_list, next)
398 mlx4_en_filter_free(filter);
402 static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid)
404 struct mlx4_en_priv *priv = netdev_priv(dev);
405 struct mlx4_en_dev *mdev = priv->mdev;
412 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
414 set_bit(vid, priv->active_vlans);
416 /* Add VID to port VLAN filter */
417 mutex_lock(&mdev->state_lock);
418 if (mdev->device_up && priv->port_up) {
419 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
421 en_err(priv, "Failed configuring VLAN filter\n");
423 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
424 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
425 mutex_unlock(&mdev->state_lock);
429 static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid)
431 struct mlx4_en_priv *priv = netdev_priv(dev);
432 struct mlx4_en_dev *mdev = priv->mdev;
438 en_dbg(HW, priv, "Killing VID:%d\n", vid);
440 clear_bit(vid, priv->active_vlans);
442 /* Remove VID from port VLAN filter */
443 mutex_lock(&mdev->state_lock);
444 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
446 if (mdev->device_up && priv->port_up) {
447 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
449 en_err(priv, "Failed configuring VLAN filter\n");
451 mutex_unlock(&mdev->state_lock);
455 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
456 unsigned char *mac, int *qpn, u64 *reg_id)
458 struct mlx4_en_dev *mdev = priv->mdev;
459 struct mlx4_dev *dev = mdev->dev;
462 switch (dev->caps.steering_mode) {
463 case MLX4_STEERING_MODE_B0: {
468 memcpy(&gid[10], mac, ETH_ALEN);
471 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
474 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
475 struct mlx4_spec_list spec_eth = { {NULL} };
476 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
478 struct mlx4_net_trans_rule rule = {
479 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
482 .promisc_mode = MLX4_FS_REGULAR,
483 .priority = MLX4_DOMAIN_NIC,
486 rule.port = priv->port;
488 INIT_LIST_HEAD(&rule.list);
490 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
491 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
492 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
493 list_add_tail(&spec_eth.list, &rule.list);
495 err = mlx4_flow_attach(dev, &rule, reg_id);
502 en_warn(priv, "Failed Attaching Unicast\n");
507 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
508 unsigned char *mac, int qpn, u64 reg_id)
510 struct mlx4_en_dev *mdev = priv->mdev;
511 struct mlx4_dev *dev = mdev->dev;
513 switch (dev->caps.steering_mode) {
514 case MLX4_STEERING_MODE_B0: {
519 memcpy(&gid[10], mac, ETH_ALEN);
522 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
525 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
526 mlx4_flow_detach(dev, reg_id);
530 en_err(priv, "Invalid steering mode.\n");
534 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
536 struct mlx4_en_dev *mdev = priv->mdev;
537 struct mlx4_dev *dev = mdev->dev;
538 struct mlx4_mac_entry *entry;
542 int *qpn = &priv->base_qpn;
543 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
545 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
546 IF_LLADDR(priv->dev));
547 index = mlx4_register_mac(dev, priv->port, mac);
550 en_err(priv, "Failed adding MAC: %pM\n",
551 IF_LLADDR(priv->dev));
555 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
556 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
557 *qpn = base_qpn + index;
561 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, 0);
562 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
564 en_err(priv, "Failed to reserve qp for mac registration\n");
568 err = mlx4_en_uc_steer_add(priv, IF_LLADDR(priv->dev), qpn, ®_id);
572 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
577 memcpy(entry->mac, IF_LLADDR(priv->dev), sizeof(entry->mac));
578 entry->reg_id = reg_id;
580 hlist_add_head(&entry->hlist,
581 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
586 mlx4_en_uc_steer_release(priv, IF_LLADDR(priv->dev), *qpn, reg_id);
589 mlx4_qp_release_range(dev, *qpn, 1);
592 mlx4_unregister_mac(dev, priv->port, mac);
596 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
598 struct mlx4_en_dev *mdev = priv->mdev;
599 struct mlx4_dev *dev = mdev->dev;
600 int qpn = priv->base_qpn;
603 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
604 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
605 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
606 IF_LLADDR(priv->dev));
607 mlx4_unregister_mac(dev, priv->port, mac);
609 struct mlx4_mac_entry *entry;
610 struct hlist_node *n, *tmp;
611 struct hlist_head *bucket;
614 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
615 bucket = &priv->mac_hash[i];
616 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
617 mac = mlx4_mac_to_u64(entry->mac);
618 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
620 mlx4_en_uc_steer_release(priv, entry->mac,
623 mlx4_unregister_mac(dev, priv->port, mac);
624 hlist_del(&entry->hlist);
629 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
631 mlx4_qp_release_range(dev, qpn, 1);
632 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
636 static void mlx4_en_clear_list(struct net_device *dev)
638 struct mlx4_en_priv *priv = netdev_priv(dev);
639 struct mlx4_en_mc_list *tmp, *mc_to_del;
641 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
642 list_del(&mc_to_del->list);
647 static void mlx4_en_cache_mclist(struct net_device *dev)
649 struct ifmultiaddr *ifma;
650 struct mlx4_en_mc_list *tmp;
651 struct mlx4_en_priv *priv = netdev_priv(dev);
653 TAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) {
654 if (ifma->ifma_addr->sa_family != AF_LINK)
656 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen !=
659 /* Make sure the list didn't grow. */
660 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
662 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN);
663 list_add_tail(&tmp->list, &priv->mc_list);
667 static void update_mclist_flags(struct mlx4_en_priv *priv,
668 struct list_head *dst,
669 struct list_head *src)
671 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
674 /* Find all the entries that should be removed from dst,
675 * These are the entries that are not found in src
677 list_for_each_entry(dst_tmp, dst, list) {
679 list_for_each_entry(src_tmp, src, list) {
680 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
686 dst_tmp->action = MCLIST_REM;
689 /* Add entries that exist in src but not in dst
690 * mark them as need to add
692 list_for_each_entry(src_tmp, src, list) {
694 list_for_each_entry(dst_tmp, dst, list) {
695 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
696 dst_tmp->action = MCLIST_NONE;
702 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
705 en_err(priv, "Failed to allocate current multicast list\n");
708 memcpy(new_mc, src_tmp,
709 sizeof(struct mlx4_en_mc_list));
710 new_mc->action = MCLIST_ADD;
711 list_add_tail(&new_mc->list, dst);
716 static void mlx4_en_set_rx_mode(struct net_device *dev)
718 struct mlx4_en_priv *priv = netdev_priv(dev);
723 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
726 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
727 struct mlx4_en_dev *mdev)
730 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
731 priv->flags |= MLX4_EN_FLAG_PROMISC;
733 /* Enable promiscouos mode */
734 switch (mdev->dev->caps.steering_mode) {
735 case MLX4_STEERING_MODE_DEVICE_MANAGED:
736 err = mlx4_flow_steer_promisc_add(mdev->dev,
739 MLX4_FS_ALL_DEFAULT);
741 en_err(priv, "Failed enabling promiscuous mode\n");
742 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
745 case MLX4_STEERING_MODE_B0:
746 err = mlx4_unicast_promisc_add(mdev->dev,
750 en_err(priv, "Failed enabling unicast promiscuous mode\n");
752 /* Add the default qp number as multicast
755 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
756 err = mlx4_multicast_promisc_add(mdev->dev,
760 en_err(priv, "Failed enabling multicast promiscuous mode\n");
761 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
765 case MLX4_STEERING_MODE_A0:
766 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
771 en_err(priv, "Failed enabling promiscuous mode\n");
775 /* Disable port multicast filter (unconditionally) */
776 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
777 0, MLX4_MCAST_DISABLE);
779 en_err(priv, "Failed disabling multicast filter\n");
783 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
784 struct mlx4_en_dev *mdev)
788 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
790 /* Disable promiscouos mode */
791 switch (mdev->dev->caps.steering_mode) {
792 case MLX4_STEERING_MODE_DEVICE_MANAGED:
793 err = mlx4_flow_steer_promisc_remove(mdev->dev,
795 MLX4_FS_ALL_DEFAULT);
797 en_err(priv, "Failed disabling promiscuous mode\n");
798 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
801 case MLX4_STEERING_MODE_B0:
802 err = mlx4_unicast_promisc_remove(mdev->dev,
806 en_err(priv, "Failed disabling unicast promiscuous mode\n");
807 /* Disable Multicast promisc */
808 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
809 err = mlx4_multicast_promisc_remove(mdev->dev,
813 en_err(priv, "Failed disabling multicast promiscuous mode\n");
814 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
818 case MLX4_STEERING_MODE_A0:
819 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
823 en_err(priv, "Failed disabling promiscuous mode\n");
828 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
829 struct net_device *dev,
830 struct mlx4_en_dev *mdev)
832 struct mlx4_en_mc_list *mclist, *tmp;
833 u8 mc_list[16] = {0};
838 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
839 if (dev->if_flags & IFF_ALLMULTI) {
840 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
841 0, MLX4_MCAST_DISABLE);
843 en_err(priv, "Failed disabling multicast filter\n");
845 /* Add the default qp number as multicast promisc */
846 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
847 switch (mdev->dev->caps.steering_mode) {
848 case MLX4_STEERING_MODE_DEVICE_MANAGED:
849 err = mlx4_flow_steer_promisc_add(mdev->dev,
855 case MLX4_STEERING_MODE_B0:
856 err = mlx4_multicast_promisc_add(mdev->dev,
861 case MLX4_STEERING_MODE_A0:
865 en_err(priv, "Failed entering multicast promisc mode\n");
866 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
869 /* Disable Multicast promisc */
870 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
871 switch (mdev->dev->caps.steering_mode) {
872 case MLX4_STEERING_MODE_DEVICE_MANAGED:
873 err = mlx4_flow_steer_promisc_remove(mdev->dev,
878 case MLX4_STEERING_MODE_B0:
879 err = mlx4_multicast_promisc_remove(mdev->dev,
884 case MLX4_STEERING_MODE_A0:
888 en_err(priv, "Failed disabling multicast promiscuous mode\n");
889 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
892 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
893 0, MLX4_MCAST_DISABLE);
895 en_err(priv, "Failed disabling multicast filter\n");
897 /* Flush mcast filter and init it with broadcast address */
898 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
899 1, MLX4_MCAST_CONFIG);
901 /* Update multicast list - we cache all addresses so they won't
902 * change while HW is updated holding the command semaphor */
903 mlx4_en_cache_mclist(dev);
904 list_for_each_entry(mclist, &priv->mc_list, list) {
905 mcast_addr = mlx4_mac_to_u64(mclist->addr);
906 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
907 mcast_addr, 0, MLX4_MCAST_CONFIG);
909 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
910 0, MLX4_MCAST_ENABLE);
912 en_err(priv, "Failed enabling multicast filter\n");
914 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
915 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
916 if (mclist->action == MCLIST_REM) {
917 /* detach this address and delete from list */
918 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
919 mc_list[5] = priv->port;
920 err = mlx4_multicast_detach(mdev->dev,
921 &priv->rss_map.indir_qp,
926 en_err(priv, "Fail to detach multicast address\n");
928 /* remove from list */
929 list_del(&mclist->list);
931 } else if (mclist->action == MCLIST_ADD) {
932 /* attach the address */
933 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
934 /* needed for B0 steering support */
935 mc_list[5] = priv->port;
936 err = mlx4_multicast_attach(mdev->dev,
937 &priv->rss_map.indir_qp,
943 en_err(priv, "Fail to attach multicast address\n");
950 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
952 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
954 struct mlx4_en_dev *mdev = priv->mdev;
955 struct net_device *dev = priv->dev;
958 mutex_lock(&mdev->state_lock);
959 if (!mdev->device_up) {
960 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
963 if (!priv->port_up) {
964 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
967 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
968 if (priv->port_state.link_state) {
969 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
970 /* Important note: the following call for if_link_state_change
971 * is needed for interface up scenario (start port, link state
973 if_link_state_change(priv->dev, LINK_STATE_UP);
974 en_dbg(HW, priv, "Link Up\n");
978 /* Promsicuous mode: disable all filters */
979 if ((dev->if_flags & IFF_PROMISC) ||
980 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
981 mlx4_en_set_promisc_mode(priv, mdev);
985 /* Not in promiscuous mode */
986 if (priv->flags & MLX4_EN_FLAG_PROMISC)
987 mlx4_en_clear_promisc_mode(priv, mdev);
989 mlx4_en_do_multicast(priv, dev, mdev);
991 mutex_unlock(&mdev->state_lock);
994 #ifdef CONFIG_NET_POLL_CONTROLLER
995 static void mlx4_en_netpoll(struct net_device *dev)
997 struct mlx4_en_priv *priv = netdev_priv(dev);
998 struct mlx4_en_cq *cq;
1002 for (i = 0; i < priv->rx_ring_num; i++) {
1003 cq = priv->rx_cq[i];
1004 spin_lock_irqsave(&cq->lock, flags);
1005 napi_synchronize(&cq->napi);
1006 mlx4_en_process_rx_cq(dev, cq, 0);
1007 spin_unlock_irqrestore(&cq->lock, flags);
1012 static void mlx4_en_watchdog_timeout(void *arg)
1014 struct mlx4_en_priv *priv = arg;
1015 struct mlx4_en_dev *mdev = priv->mdev;
1017 en_dbg(DRV, priv, "Scheduling watchdog\n");
1018 queue_work(mdev->workqueue, &priv->watchdog_task);
1020 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1021 mlx4_en_watchdog_timeout, priv);
1026 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1028 struct mlx4_en_cq *cq;
1031 /* If we haven't received a specific coalescing setting
1032 * (module param), we set the moderation parameters as follows:
1033 * - moder_cnt is set to the number of mtu sized packets to
1034 * satisfy our coelsing target.
1035 * - moder_time is set to a fixed value.
1037 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->if_mtu + 1;
1038 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1039 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1040 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1041 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - "
1042 "rx_frames:%d rx_usecs:%d\n",
1043 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs);
1045 /* Setup cq moderation params */
1046 for (i = 0; i < priv->rx_ring_num; i++) {
1047 cq = priv->rx_cq[i];
1048 cq->moder_cnt = priv->rx_frames;
1049 cq->moder_time = priv->rx_usecs;
1050 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1051 priv->last_moder_packets[i] = 0;
1052 priv->last_moder_bytes[i] = 0;
1055 for (i = 0; i < priv->tx_ring_num; i++) {
1056 cq = priv->tx_cq[i];
1057 cq->moder_cnt = priv->tx_frames;
1058 cq->moder_time = priv->tx_usecs;
1061 /* Reset auto-moderation params */
1062 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1063 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1064 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1065 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1066 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1067 priv->adaptive_rx_coal = 1;
1068 priv->last_moder_jiffies = 0;
1069 priv->last_moder_tx_packets = 0;
1072 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1074 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1075 struct mlx4_en_cq *cq;
1076 unsigned long packets;
1078 unsigned long avg_pkt_size;
1079 unsigned long rx_packets;
1080 unsigned long rx_bytes;
1081 unsigned long rx_pkt_diff;
1085 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1088 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1089 spin_lock(&priv->stats_lock);
1090 rx_packets = priv->rx_ring[ring]->packets;
1091 rx_bytes = priv->rx_ring[ring]->bytes;
1092 spin_unlock(&priv->stats_lock);
1094 rx_pkt_diff = ((unsigned long) (rx_packets -
1095 priv->last_moder_packets[ring]));
1096 packets = rx_pkt_diff;
1097 rate = packets * HZ / period;
1098 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1099 priv->last_moder_bytes[ring])) / packets : 0;
1101 /* Apply auto-moderation only when packet rate
1102 * exceeds a rate that it matters */
1103 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1104 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1105 if (rate < priv->pkt_rate_low)
1106 moder_time = priv->rx_usecs_low;
1107 else if (rate > priv->pkt_rate_high)
1108 moder_time = priv->rx_usecs_high;
1110 moder_time = (rate - priv->pkt_rate_low) *
1111 (priv->rx_usecs_high - priv->rx_usecs_low) /
1112 (priv->pkt_rate_high - priv->pkt_rate_low) +
1115 moder_time = priv->rx_usecs_low;
1118 if (moder_time != priv->last_moder_time[ring]) {
1119 priv->last_moder_time[ring] = moder_time;
1120 cq = priv->rx_cq[ring];
1121 cq->moder_time = moder_time;
1122 err = mlx4_en_set_cq_moder(priv, cq);
1124 en_err(priv, "Failed modifying moderation for cq:%d\n",
1127 priv->last_moder_packets[ring] = rx_packets;
1128 priv->last_moder_bytes[ring] = rx_bytes;
1131 priv->last_moder_jiffies = jiffies;
1134 static void mlx4_en_do_get_stats(struct work_struct *work)
1136 struct delayed_work *delay = to_delayed_work(work);
1137 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1139 struct mlx4_en_dev *mdev = priv->mdev;
1142 mutex_lock(&mdev->state_lock);
1143 if (mdev->device_up) {
1144 if (priv->port_up) {
1145 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1147 en_dbg(HW, priv, "Could not update stats\n");
1149 mlx4_en_auto_moderation(priv);
1152 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1154 mutex_unlock(&mdev->state_lock);
1157 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1160 static void mlx4_en_service_task(struct work_struct *work)
1162 struct delayed_work *delay = to_delayed_work(work);
1163 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1165 struct mlx4_en_dev *mdev = priv->mdev;
1167 mutex_lock(&mdev->state_lock);
1168 if (mdev->device_up) {
1169 queue_delayed_work(mdev->workqueue, &priv->service_task,
1170 SERVICE_TASK_DELAY);
1172 mutex_unlock(&mdev->state_lock);
1175 static void mlx4_en_linkstate(struct work_struct *work)
1177 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1179 struct mlx4_en_dev *mdev = priv->mdev;
1180 int linkstate = priv->link_state;
1182 mutex_lock(&mdev->state_lock);
1183 /* If observable port state changed set carrier state and
1184 * report to system log */
1185 if (priv->last_link_state != linkstate) {
1186 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1187 en_info(priv, "Link Down\n");
1188 if_link_state_change(priv->dev, LINK_STATE_DOWN);
1189 /* make sure the port is up before notifying the OS.
1190 * This is tricky since we get here on INIT_PORT and
1191 * in such case we can't tell the OS the port is up.
1192 * To solve this there is a call to if_link_state_change
1195 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){
1196 en_info(priv, "Link Up\n");
1197 if_link_state_change(priv->dev, LINK_STATE_UP);
1200 priv->last_link_state = linkstate;
1201 mutex_unlock(&mdev->state_lock);
1205 int mlx4_en_start_port(struct net_device *dev)
1207 struct mlx4_en_priv *priv = netdev_priv(dev);
1208 struct mlx4_en_dev *mdev = priv->mdev;
1209 struct mlx4_en_cq *cq;
1210 struct mlx4_en_tx_ring *tx_ring;
1216 u8 mc_list[16] = {0};
1219 if (priv->port_up) {
1220 en_dbg(DRV, priv, "start port called while port already up\n");
1224 INIT_LIST_HEAD(&priv->mc_list);
1225 INIT_LIST_HEAD(&priv->curr_list);
1226 INIT_LIST_HEAD(&priv->ethtool_list);
1228 /* Calculate Rx buf size */
1229 dev->if_mtu = min(dev->if_mtu, priv->max_mtu);
1230 mlx4_en_calc_rx_buf(dev);
1231 priv->rx_alloc_size = max_t(int, 2 * roundup_pow_of_two(priv->rx_mb_size),
1233 priv->rx_alloc_order = get_order(priv->rx_alloc_size);
1234 priv->rx_buf_size = roundup_pow_of_two(priv->rx_mb_size);
1235 priv->log_rx_info = ROUNDUP_LOG2(sizeof(struct mlx4_en_rx_buf));
1236 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size);
1238 /* Configure rx cq's and rings */
1239 err = mlx4_en_activate_rx_rings(priv);
1241 en_err(priv, "Failed to activate RX rings\n");
1244 for (i = 0; i < priv->rx_ring_num; i++) {
1245 cq = priv->rx_cq[i];
1247 mlx4_en_cq_init_lock(cq);
1248 err = mlx4_en_activate_cq(priv, cq, i);
1250 en_err(priv, "Failed activating Rx CQ\n");
1253 for (j = 0; j < cq->size; j++)
1254 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1255 err = mlx4_en_set_cq_moder(priv, cq);
1257 en_err(priv, "Failed setting cq moderation parameters");
1258 mlx4_en_deactivate_cq(priv, cq);
1261 mlx4_en_arm_cq(priv, cq);
1262 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1267 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1268 err = mlx4_en_get_qp(priv);
1270 en_err(priv, "Failed getting eth qp\n");
1273 mdev->mac_removed[priv->port] = 0;
1275 /* gets default allocated counter index from func cap */
1276 /* or sink counter index if no resources */
1277 priv->counter_index = mdev->dev->caps.def_counter_index[priv->port - 1];
1279 en_dbg(DRV, priv, "%s: default counter index %d for port %d\n",
1280 __func__, priv->counter_index, priv->port);
1282 err = mlx4_en_config_rss_steer(priv);
1284 en_err(priv, "Failed configuring rss steering\n");
1288 err = mlx4_en_create_drop_qp(priv);
1292 /* Configure tx cq's and rings */
1293 for (i = 0; i < priv->tx_ring_num; i++) {
1295 cq = priv->tx_cq[i];
1296 err = mlx4_en_activate_cq(priv, cq, i);
1298 en_err(priv, "Failed allocating Tx CQ\n");
1301 err = mlx4_en_set_cq_moder(priv, cq);
1303 en_err(priv, "Failed setting cq moderation parameters");
1304 mlx4_en_deactivate_cq(priv, cq);
1307 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1308 cq->buf->wqe_index = cpu_to_be16(0xffff);
1310 /* Configure ring */
1311 tx_ring = priv->tx_ring[i];
1313 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1314 i / priv->num_tx_rings_p_up);
1316 en_err(priv, "Failed allocating Tx ring\n");
1317 mlx4_en_deactivate_cq(priv, cq);
1321 /* Arm CQ for TX completions */
1322 mlx4_en_arm_cq(priv, cq);
1324 /* Set initial ownership of all Tx TXBBs to SW (1) */
1325 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1326 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1330 /* Configure port */
1331 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1333 priv->prof->tx_pause,
1335 priv->prof->rx_pause,
1336 priv->prof->rx_ppp);
1338 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1342 /* Set default qp number */
1343 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1345 en_err(priv, "Failed setting default qp numbers\n");
1350 en_dbg(HW, priv, "Initializing port\n");
1351 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1353 en_err(priv, "Failed Initializing port\n");
1357 /* Attach rx QP to bradcast address */
1358 memset(&mc_list[10], 0xff, ETH_ALEN);
1359 mc_list[5] = priv->port; /* needed for B0 steering support */
1360 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1361 priv->port, 0, MLX4_PROT_ETH,
1362 &priv->broadcast_id))
1363 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1365 /* Must redo promiscuous mode setup. */
1366 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1368 /* Schedule multicast task to populate multicast list */
1369 queue_work(mdev->workqueue, &priv->rx_mode_task);
1371 mlx4_set_stats_bitmap(mdev->dev, priv->stats_bitmap);
1373 priv->port_up = true;
1375 /* Enable the queues. */
1376 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1377 dev->if_drv_flags |= IFF_DRV_RUNNING;
1378 #ifdef CONFIG_DEBUG_FS
1379 mlx4_en_create_debug_files(priv);
1381 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1382 mlx4_en_watchdog_timeout, priv);
1388 while (tx_index--) {
1389 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1390 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1392 mlx4_en_destroy_drop_qp(priv);
1394 mlx4_en_release_rss_steer(priv);
1396 mlx4_en_put_qp(priv);
1399 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1400 for (i = 0; i < priv->rx_ring_num; i++)
1401 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1403 return err; /* need to close devices */
1407 void mlx4_en_stop_port(struct net_device *dev)
1409 struct mlx4_en_priv *priv = netdev_priv(dev);
1410 struct mlx4_en_dev *mdev = priv->mdev;
1411 struct mlx4_en_mc_list *mclist, *tmp;
1413 u8 mc_list[16] = {0};
1415 if (!priv->port_up) {
1416 en_dbg(DRV, priv, "stop port called while port already down\n");
1420 #ifdef CONFIG_DEBUG_FS
1421 mlx4_en_delete_debug_files(priv);
1425 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1427 /* Set port as not active */
1428 priv->port_up = false;
1429 if (priv->counter_index != 0xff) {
1430 mlx4_counter_free(mdev->dev, priv->port, priv->counter_index);
1431 priv->counter_index = 0xff;
1434 /* Promsicuous mode */
1435 if (mdev->dev->caps.steering_mode ==
1436 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1437 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1438 MLX4_EN_FLAG_MC_PROMISC);
1439 mlx4_flow_steer_promisc_remove(mdev->dev,
1441 MLX4_FS_ALL_DEFAULT);
1442 mlx4_flow_steer_promisc_remove(mdev->dev,
1444 MLX4_FS_MC_DEFAULT);
1445 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1446 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1448 /* Disable promiscouos mode */
1449 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1452 /* Disable Multicast promisc */
1453 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1454 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1456 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1460 /* Detach All multicasts */
1461 memset(&mc_list[10], 0xff, ETH_ALEN);
1462 mc_list[5] = priv->port; /* needed for B0 steering support */
1463 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1464 MLX4_PROT_ETH, priv->broadcast_id);
1465 list_for_each_entry(mclist, &priv->curr_list, list) {
1466 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1467 mc_list[5] = priv->port;
1468 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1469 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1471 mlx4_en_clear_list(dev);
1472 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1473 list_del(&mclist->list);
1477 /* Flush multicast filter */
1478 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1479 mlx4_en_destroy_drop_qp(priv);
1482 for (i = 0; i < priv->tx_ring_num; i++) {
1483 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1484 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1488 for (i = 0; i < priv->tx_ring_num; i++)
1489 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1492 mlx4_en_release_rss_steer(priv);
1494 /* Unregister Mac address for the port */
1495 mlx4_en_put_qp(priv);
1496 mdev->mac_removed[priv->port] = 1;
1499 for (i = 0; i < priv->rx_ring_num; i++) {
1500 struct mlx4_en_cq *cq = priv->rx_cq[i];
1501 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1502 mlx4_en_deactivate_cq(priv, cq);
1505 callout_stop(&priv->watchdog_timer);
1507 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1510 static void mlx4_en_restart(struct work_struct *work)
1512 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1514 struct mlx4_en_dev *mdev = priv->mdev;
1515 struct net_device *dev = priv->dev;
1516 struct mlx4_en_tx_ring *ring;
1520 if (priv->blocked == 0 || priv->port_up == 0)
1522 for (i = 0; i < priv->tx_ring_num; i++) {
1523 ring = priv->tx_ring[i];
1524 if (ring->blocked &&
1525 ring->watchdog_time + MLX4_EN_WATCHDOG_TIMEOUT < ticks)
1531 priv->port_stats.tx_timeout++;
1532 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1534 mutex_lock(&mdev->state_lock);
1535 if (priv->port_up) {
1536 mlx4_en_stop_port(dev);
1537 //for (i = 0; i < priv->tx_ring_num; i++)
1538 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue);
1539 if (mlx4_en_start_port(dev))
1540 en_err(priv, "Failed restarting port %d\n", priv->port);
1542 mutex_unlock(&mdev->state_lock);
1545 static void mlx4_en_clear_stats(struct net_device *dev)
1547 struct mlx4_en_priv *priv = netdev_priv(dev);
1548 struct mlx4_en_dev *mdev = priv->mdev;
1551 if (!mlx4_is_slave(mdev->dev))
1552 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1553 en_dbg(HW, priv, "Failed dumping statistics\n");
1555 memset(&priv->pstats, 0, sizeof(priv->pstats));
1556 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1557 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1558 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats));
1560 for (i = 0; i < priv->tx_ring_num; i++) {
1561 priv->tx_ring[i]->bytes = 0;
1562 priv->tx_ring[i]->packets = 0;
1563 priv->tx_ring[i]->tx_csum = 0;
1565 for (i = 0; i < priv->rx_ring_num; i++) {
1566 priv->rx_ring[i]->bytes = 0;
1567 priv->rx_ring[i]->packets = 0;
1568 priv->rx_ring[i]->csum_ok = 0;
1569 priv->rx_ring[i]->csum_none = 0;
1573 static void mlx4_en_open(void* arg)
1576 struct mlx4_en_priv *priv;
1577 struct mlx4_en_dev *mdev;
1578 struct net_device *dev;
1586 mutex_lock(&mdev->state_lock);
1588 if (!mdev->device_up) {
1589 en_err(priv, "Cannot open - device down/disabled\n");
1593 /* Reset HW statistics and SW counters */
1594 mlx4_en_clear_stats(dev);
1596 err = mlx4_en_start_port(dev);
1598 en_err(priv, "Failed starting port:%d\n", priv->port);
1601 mutex_unlock(&mdev->state_lock);
1605 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1609 #ifdef CONFIG_RFS_ACCEL
1610 if (priv->dev->rx_cpu_rmap) {
1611 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1612 priv->dev->rx_cpu_rmap = NULL;
1616 for (i = 0; i < priv->tx_ring_num; i++) {
1617 if (priv->tx_ring && priv->tx_ring[i])
1618 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1619 if (priv->tx_cq && priv->tx_cq[i])
1620 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1623 for (i = 0; i < priv->rx_ring_num; i++) {
1624 if (priv->rx_ring[i])
1625 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1626 priv->prof->rx_ring_size, priv->stride);
1628 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1632 sysctl_ctx_free(&priv->stat_ctx);
1637 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1639 struct mlx4_en_port_profile *prof = priv->prof;
1643 /* Create rx Rings */
1644 for (i = 0; i < priv->rx_ring_num; i++) {
1645 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1646 prof->rx_ring_size, i, RX, node))
1649 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1650 prof->rx_ring_size, node))
1654 /* Create tx Rings */
1655 for (i = 0; i < priv->tx_ring_num; i++) {
1656 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1657 prof->tx_ring_size, i, TX, node))
1660 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1661 prof->tx_ring_size, TXBB_SIZE, node, i))
1665 #ifdef CONFIG_RFS_ACCEL
1666 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
1667 if (!priv->dev->rx_cpu_rmap)
1670 /* Re-create stat sysctls in case the number of rings changed. */
1671 mlx4_en_sysctl_stat(priv);
1675 en_err(priv, "Failed to allocate NIC resources\n");
1676 for (i = 0; i < priv->rx_ring_num; i++) {
1677 if (priv->rx_ring[i])
1678 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1682 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1684 for (i = 0; i < priv->tx_ring_num; i++) {
1685 if (priv->tx_ring[i])
1686 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1688 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1690 priv->port_up = false;
1694 struct en_port_attribute {
1695 struct attribute attr;
1696 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf);
1697 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count);
1700 #define PORT_ATTR_RO(_name) \
1701 struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name)
1703 #define EN_PORT_ATTR(_name, _mode, _show, _store) \
1704 struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store)
1706 void mlx4_en_destroy_netdev(struct net_device *dev)
1708 struct mlx4_en_priv *priv = netdev_priv(dev);
1709 struct mlx4_en_dev *mdev = priv->mdev;
1711 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
1713 if (priv->vlan_attach != NULL)
1714 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
1715 if (priv->vlan_detach != NULL)
1716 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
1718 /* Unregister device - this will close the port if it was up */
1719 if (priv->registered)
1720 ether_ifdetach(dev);
1722 if (priv->allocated)
1723 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
1725 mutex_lock(&mdev->state_lock);
1726 mlx4_en_stop_port(dev);
1727 mutex_unlock(&mdev->state_lock);
1730 cancel_delayed_work(&priv->stats_task);
1731 cancel_delayed_work(&priv->service_task);
1732 /* flush any pending task for this netdev */
1733 flush_workqueue(mdev->workqueue);
1734 callout_drain(&priv->watchdog_timer);
1736 /* Detach the netdev so tasks would not attempt to access it */
1737 mutex_lock(&mdev->state_lock);
1738 mdev->pndev[priv->port] = NULL;
1739 mutex_unlock(&mdev->state_lock);
1742 mlx4_en_free_resources(priv);
1744 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */
1746 sysctl_ctx_free(&priv->conf_ctx);
1748 kfree(priv->tx_ring);
1756 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1758 struct mlx4_en_priv *priv = netdev_priv(dev);
1759 struct mlx4_en_dev *mdev = priv->mdev;
1762 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n",
1763 (unsigned)dev->if_mtu, (unsigned)new_mtu);
1765 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
1766 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
1769 mutex_lock(&mdev->state_lock);
1770 dev->if_mtu = new_mtu;
1771 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1772 if (!mdev->device_up) {
1773 /* NIC is probably restarting - let watchdog task reset
1775 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
1777 mlx4_en_stop_port(dev);
1778 err = mlx4_en_start_port(dev);
1780 en_err(priv, "Failed restarting port:%d\n",
1782 queue_work(mdev->workqueue, &priv->watchdog_task);
1786 mutex_unlock(&mdev->state_lock);
1790 static int mlx4_en_calc_media(struct mlx4_en_priv *priv)
1796 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN)
1799 * [ShaharK] mlx4_en_QUERY_PORT sleeps and cannot be called under a
1800 * non-sleepable lock.
1801 * I moved it to the periodic mlx4_en_do_get_stats.
1802 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
1806 trans_type = priv->port_state.transciver;
1807 /* XXX I don't know all of the transceiver values. */
1808 switch (priv->port_state.link_speed) {
1810 active |= IFM_1000_T;
1813 if (trans_type > 0 && trans_type <= 0xC)
1814 active |= IFM_10G_SR;
1815 else if (trans_type == 0x80 || trans_type == 0)
1816 active |= IFM_10G_CX4;
1819 active |= IFM_40G_CR4;
1822 if (priv->prof->tx_pause)
1823 active |= IFM_ETH_TXPAUSE;
1824 if (priv->prof->rx_pause)
1825 active |= IFM_ETH_RXPAUSE;
1830 static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
1832 struct mlx4_en_priv *priv;
1834 priv = dev->if_softc;
1835 ifmr->ifm_status = IFM_AVALID;
1836 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN)
1837 ifmr->ifm_status |= IFM_ACTIVE;
1838 ifmr->ifm_active = mlx4_en_calc_media(priv);
1843 static int mlx4_en_media_change(struct ifnet *dev)
1845 struct mlx4_en_priv *priv;
1846 struct ifmedia *ifm;
1851 priv = dev->if_softc;
1853 rxpause = txpause = 0;
1856 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1858 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1865 if ((IFM_SUBTYPE(ifm->ifm_media)
1866 == IFM_SUBTYPE(mlx4_en_calc_media(priv)))
1867 && (ifm->ifm_media & IFM_FDX))
1871 printf("%s: Only auto media type\n", if_name(dev));
1874 /* Allow user to set/clear pause */
1875 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
1877 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
1879 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) {
1880 priv->prof->tx_pause = txpause;
1881 priv->prof->rx_pause = rxpause;
1882 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
1883 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause,
1884 priv->prof->tx_ppp, priv->prof->rx_pause,
1885 priv->prof->rx_ppp);
1890 static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
1892 struct mlx4_en_priv *priv;
1893 struct mlx4_en_dev *mdev;
1900 priv = dev->if_softc;
1902 ifr = (struct ifreq *) data;
1906 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu);
1909 mutex_lock(&mdev->state_lock);
1910 if (dev->if_flags & IFF_UP) {
1911 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0)
1912 mlx4_en_start_port(dev);
1914 mlx4_en_set_rx_mode(dev);
1916 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1917 mlx4_en_stop_port(dev);
1918 if_link_state_change(dev, LINK_STATE_DOWN);
1921 mutex_unlock(&mdev->state_lock);
1925 mlx4_en_set_rx_mode(dev);
1929 error = ifmedia_ioctl(dev, ifr, &priv->media, command);
1932 mutex_lock(&mdev->state_lock);
1933 mask = ifr->ifr_reqcap ^ dev->if_capenable;
1934 if (mask & IFCAP_HWCSUM)
1935 dev->if_capenable ^= IFCAP_HWCSUM;
1936 if (mask & IFCAP_TSO4)
1937 dev->if_capenable ^= IFCAP_TSO4;
1938 if (mask & IFCAP_TSO6)
1939 dev->if_capenable ^= IFCAP_TSO6;
1940 if (mask & IFCAP_LRO)
1941 dev->if_capenable ^= IFCAP_LRO;
1942 if (mask & IFCAP_VLAN_HWTAGGING)
1943 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1944 if (mask & IFCAP_VLAN_HWFILTER)
1945 dev->if_capenable ^= IFCAP_VLAN_HWFILTER;
1946 if (mask & IFCAP_WOL_MAGIC)
1947 dev->if_capenable ^= IFCAP_WOL_MAGIC;
1948 if (dev->if_drv_flags & IFF_DRV_RUNNING)
1949 mlx4_en_start_port(dev);
1950 mutex_unlock(&mdev->state_lock);
1951 VLAN_CAPABILITIES(dev);
1954 error = ether_ioctl(dev, command, data);
1962 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1963 struct mlx4_en_port_profile *prof)
1965 struct net_device *dev;
1966 struct mlx4_en_priv *priv;
1967 uint8_t dev_addr[ETHER_ADDR_LEN];
1971 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1972 dev = priv->dev = if_alloc(IFT_ETHER);
1974 en_err(priv, "Net device allocation failed\n");
1978 dev->if_softc = priv;
1979 if_initname(dev, "mlxen", atomic_fetchadd_int(&mlx4_en_unit, 1));
1980 dev->if_mtu = ETHERMTU;
1981 dev->if_baudrate = 1000000000;
1982 dev->if_init = mlx4_en_open;
1983 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1984 dev->if_ioctl = mlx4_en_ioctl;
1985 dev->if_transmit = mlx4_en_transmit;
1986 dev->if_qflush = mlx4_en_qflush;
1987 dev->if_snd.ifq_maxlen = prof->tx_ring_size;
1990 * Initialize driver private data
1992 priv->counter_index = 0xff;
1993 spin_lock_init(&priv->stats_lock);
1994 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
1995 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
1996 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
1997 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
1998 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
1999 callout_init(&priv->watchdog_timer, 1);
2000 #ifdef CONFIG_RFS_ACCEL
2001 INIT_LIST_HEAD(&priv->filters);
2002 spin_lock_init(&priv->filters_lock);
2005 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2008 priv->ddev = &mdev->pdev->dev;
2011 priv->port_up = false;
2012 priv->flags = prof->flags;
2013 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2014 MLX4_WQE_CTRL_SOLICITED);
2016 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2017 priv->tx_ring_num = prof->tx_ring_num;
2018 priv->tx_ring = kcalloc(MAX_TX_RINGS,
2019 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL);
2020 if (!priv->tx_ring) {
2024 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS,
2031 priv->rx_ring_num = prof->rx_ring_num;
2032 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2033 priv->mac_index = -1;
2034 priv->last_ifq_jiffies = 0;
2035 priv->if_counters_rx_errors = 0;
2036 priv->if_counters_rx_no_buffer = 0;
2037 #ifdef CONFIG_MLX4_EN_DCB
2038 if (!mlx4_is_slave(priv->mdev->dev)) {
2039 priv->dcbx_cap = DCB_CAP_DCBX_HOST;
2040 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
2041 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2042 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2044 en_info(priv, "QoS disabled - no HW support\n");
2045 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2050 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2051 INIT_HLIST_HEAD(&priv->mac_hash[i]);
2054 /* Query for default mac and max mtu */
2055 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2056 priv->mac = mdev->dev->caps.def_mac[priv->port];
2057 if (ILLEGAL_MAC(priv->mac)) {
2058 #if BITS_PER_LONG == 64
2059 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n",
2060 priv->port, priv->mac);
2061 #elif BITS_PER_LONG == 32
2062 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
2063 priv->port, priv->mac);
2071 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2074 mlx4_en_sysctl_conf(priv);
2076 err = mlx4_en_alloc_resources(priv);
2080 /* Allocate page for receive rings */
2081 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2082 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2084 en_err(priv, "Failed to allocate page for rx qps\n");
2087 priv->allocated = 1;
2090 * Set driver features
2092 dev->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
2093 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
2094 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
2095 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
2096 dev->if_capabilities |= IFCAP_LRO;
2098 if (mdev->LSO_support)
2099 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO;
2101 /* set TSO limits so that we don't have to drop TX packets */
2102 dev->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2103 dev->if_hw_tsomaxsegcount = 16;
2104 dev->if_hw_tsomaxsegsize = 65536; /* XXX can do up to 4GByte */
2106 dev->if_capenable = dev->if_capabilities;
2108 dev->if_hwassist = 0;
2109 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6))
2110 dev->if_hwassist |= CSUM_TSO;
2111 if (dev->if_capenable & IFCAP_TXCSUM)
2112 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2115 /* Register for VLAN events */
2116 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
2117 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
2118 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
2119 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
2121 mdev->pndev[priv->port] = dev;
2123 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN;
2124 mlx4_en_set_default_moderation(priv);
2126 /* Set default MAC */
2127 for (i = 0; i < ETHER_ADDR_LEN; i++)
2128 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i));
2131 ether_ifattach(dev, dev_addr);
2132 if_link_state_change(dev, LINK_STATE_DOWN);
2133 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
2134 mlx4_en_media_change, mlx4_en_media_status);
2135 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL);
2136 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL);
2137 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL);
2138 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL);
2139 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2140 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
2142 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2143 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2145 priv->registered = 1;
2147 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2148 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2151 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
2152 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2154 prof->tx_pause, prof->tx_ppp,
2155 prof->rx_pause, prof->rx_ppp);
2157 en_err(priv, "Failed setting port general configurations "
2158 "for port %d, with error %d\n", priv->port, err);
2163 en_warn(priv, "Initializing port\n");
2164 err = mlx4_INIT_PORT(mdev->dev, priv->port);
2166 en_err(priv, "Failed Initializing port\n");
2170 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2172 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2173 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY);
2180 mlx4_en_destroy_netdev(dev);
2183 static int mlx4_en_set_ring_size(struct net_device *dev,
2184 int rx_size, int tx_size)
2186 struct mlx4_en_priv *priv = netdev_priv(dev);
2187 struct mlx4_en_dev *mdev = priv->mdev;
2191 rx_size = roundup_pow_of_two(rx_size);
2192 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
2193 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
2194 tx_size = roundup_pow_of_two(tx_size);
2195 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
2196 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
2198 if (rx_size == (priv->port_up ?
2199 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) &&
2200 tx_size == priv->tx_ring[0]->size)
2202 mutex_lock(&mdev->state_lock);
2203 if (priv->port_up) {
2205 mlx4_en_stop_port(dev);
2207 mlx4_en_free_resources(priv);
2208 priv->prof->tx_ring_size = tx_size;
2209 priv->prof->rx_ring_size = rx_size;
2210 err = mlx4_en_alloc_resources(priv);
2212 en_err(priv, "Failed reallocating port resources\n");
2216 err = mlx4_en_start_port(dev);
2218 en_err(priv, "Failed starting port\n");
2221 mutex_unlock(&mdev->state_lock);
2224 static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS)
2226 struct mlx4_en_priv *priv;
2231 size = priv->prof->rx_ring_size;
2232 error = sysctl_handle_int(oidp, &size, 0, req);
2233 if (error || !req->newptr)
2235 error = -mlx4_en_set_ring_size(priv->dev, size,
2236 priv->prof->tx_ring_size);
2240 static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS)
2242 struct mlx4_en_priv *priv;
2247 size = priv->prof->tx_ring_size;
2248 error = sysctl_handle_int(oidp, &size, 0, req);
2249 if (error || !req->newptr)
2251 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size,
2257 static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS)
2259 struct mlx4_en_priv *priv;
2264 ppp = priv->prof->tx_ppp;
2265 error = sysctl_handle_int(oidp, &ppp, 0, req);
2266 if (error || !req->newptr)
2268 if (ppp > 0xff || ppp < 0)
2270 priv->prof->tx_ppp = ppp;
2271 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2272 priv->rx_mb_size + ETHER_CRC_LEN,
2273 priv->prof->tx_pause,
2275 priv->prof->rx_pause,
2276 priv->prof->rx_ppp);
2281 static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS)
2283 struct mlx4_en_priv *priv;
2284 struct mlx4_en_dev *mdev;
2292 ppp = priv->prof->rx_ppp;
2293 error = sysctl_handle_int(oidp, &ppp, 0, req);
2294 if (error || !req->newptr)
2296 if (ppp > 0xff || ppp < 0)
2298 /* See if we have to change the number of tx queues. */
2299 if (!ppp != !priv->prof->rx_ppp) {
2300 mutex_lock(&mdev->state_lock);
2301 if (priv->port_up) {
2303 mlx4_en_stop_port(priv->dev);
2305 mlx4_en_free_resources(priv);
2306 priv->prof->rx_ppp = ppp;
2307 error = -mlx4_en_alloc_resources(priv);
2309 en_err(priv, "Failed reallocating port resources\n");
2310 if (error == 0 && port_up) {
2311 error = -mlx4_en_start_port(priv->dev);
2313 en_err(priv, "Failed starting port\n");
2315 mutex_unlock(&mdev->state_lock);
2319 priv->prof->rx_ppp = ppp;
2320 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2321 priv->rx_mb_size + ETHER_CRC_LEN,
2322 priv->prof->tx_pause,
2324 priv->prof->rx_pause,
2325 priv->prof->rx_ppp);
2330 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
2332 struct net_device *dev;
2333 struct sysctl_ctx_list *ctx;
2334 struct sysctl_oid *node;
2335 struct sysctl_oid_list *node_list;
2336 struct sysctl_oid *coal;
2337 struct sysctl_oid_list *coal_list;
2338 const char *pnameunit;
2341 ctx = &priv->conf_ctx;
2342 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev);
2344 sysctl_ctx_init(ctx);
2345 priv->sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw),
2346 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet");
2347 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO,
2348 "conf", CTLFLAG_RD, NULL, "Configuration");
2349 node_list = SYSCTL_CHILDREN(node);
2351 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable",
2352 CTLFLAG_RW, &priv->msg_enable, 0,
2353 "Driver message enable bitfield");
2354 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings",
2355 CTLFLAG_RD, &priv->rx_ring_num, 0,
2356 "Number of receive rings");
2357 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings",
2358 CTLFLAG_RD, &priv->tx_ring_num, 0,
2359 "Number of transmit rings");
2360 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size",
2361 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2362 mlx4_en_set_rx_ring_size, "I", "Receive ring size");
2363 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size",
2364 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2365 mlx4_en_set_tx_ring_size, "I", "Transmit ring size");
2366 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp",
2367 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2368 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause");
2369 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp",
2370 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2371 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause");
2372 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num",
2373 CTLFLAG_RD, &priv->port, 0,
2375 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name",
2376 CTLFLAG_RD, __DECONST(void *, pnameunit), 0,
2379 /* Add coalescer configuration. */
2380 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO,
2381 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration");
2382 coal_list = SYSCTL_CHILDREN(node);
2383 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low",
2384 CTLFLAG_RW, &priv->pkt_rate_low, 0,
2385 "Packets per-second for minimum delay");
2386 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low",
2387 CTLFLAG_RW, &priv->rx_usecs_low, 0,
2388 "Minimum RX delay in micro-seconds");
2389 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high",
2390 CTLFLAG_RW, &priv->pkt_rate_high, 0,
2391 "Packets per-second for maximum delay");
2392 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high",
2393 CTLFLAG_RW, &priv->rx_usecs_high, 0,
2394 "Maximum RX delay in micro-seconds");
2395 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval",
2396 CTLFLAG_RW, &priv->sample_interval, 0,
2397 "adaptive frequency in units of HZ ticks");
2398 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal",
2399 CTLFLAG_RW, &priv->adaptive_rx_coal, 0,
2400 "Enable adaptive rx coalescing");
2404 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
2406 struct net_device *dev;
2407 struct sysctl_ctx_list *ctx;
2408 struct sysctl_oid *node;
2409 struct sysctl_oid_list *node_list;
2410 struct sysctl_oid *ring_node;
2411 struct sysctl_oid_list *ring_list;
2412 struct mlx4_en_tx_ring *tx_ring;
2413 struct mlx4_en_rx_ring *rx_ring;
2419 ctx = &priv->stat_ctx;
2420 sysctl_ctx_init(ctx);
2421 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO,
2422 "stat", CTLFLAG_RD, NULL, "Statistics");
2423 node_list = SYSCTL_CHILDREN(node);
2425 #ifdef MLX4_EN_PERF_STAT
2426 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD,
2427 &priv->pstats.tx_poll, "TX Poll calls");
2428 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD,
2429 &priv->pstats.tx_pktsz_avg, "TX average packet size");
2430 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD,
2431 &priv->pstats.inflight_avg, "TX average packets in-flight");
2432 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD,
2433 &priv->pstats.tx_coal_avg, "TX average coalesced completions");
2434 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD,
2435 &priv->pstats.rx_coal_avg, "RX average coalesced completions");
2438 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD,
2439 &priv->port_stats.tso_packets, "TSO packets sent");
2440 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD,
2441 &priv->port_stats.queue_stopped, "Queue full");
2442 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD,
2443 &priv->port_stats.wake_queue, "Queue resumed after full");
2444 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
2445 &priv->port_stats.tx_timeout, "Transmit timeouts");
2446 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
2447 &priv->port_stats.rx_alloc_failed, "RX failed to allocate mbuf");
2448 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
2449 &priv->port_stats.rx_chksum_good, "RX checksum offload success");
2450 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD,
2451 &priv->port_stats.rx_chksum_none, "RX without checksum offload");
2452 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_chksum_offload",
2453 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload,
2454 "TX checksum offloads");
2456 /* Could strdup the names and add in a loop. This is simpler. */
2457 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
2458 &priv->pkstats.rx_bytes, "RX Bytes");
2459 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD,
2460 &priv->pkstats.rx_packets, "RX packets");
2461 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD,
2462 &priv->pkstats.rx_multicast_packets, "RX Multicast Packets");
2463 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD,
2464 &priv->pkstats.rx_broadcast_packets, "RX Broadcast Packets");
2465 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD,
2466 &priv->pkstats.rx_errors, "RX Errors");
2467 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD,
2468 &priv->pkstats.rx_dropped, "RX Dropped");
2469 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD,
2470 &priv->pkstats.rx_length_errors, "RX Length Errors");
2471 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD,
2472 &priv->pkstats.rx_over_errors, "RX Over Errors");
2473 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
2474 &priv->pkstats.rx_crc_errors, "RX CRC Errors");
2475 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD,
2476 &priv->pkstats.rx_jabbers, "RX Jabbers");
2479 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD,
2480 &priv->pkstats.rx_in_range_length_error, "RX IN_Range Length Error");
2481 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_out_range_length_error",
2482 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error,
2483 "RX Out Range Length Error");
2484 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD,
2485 &priv->pkstats.rx_lt_64_bytes_packets, "RX Lt 64 Bytes Packets");
2486 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD,
2487 &priv->pkstats.rx_127_bytes_packets, "RX 127 bytes Packets");
2488 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD,
2489 &priv->pkstats.rx_255_bytes_packets, "RX 255 bytes Packets");
2490 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD,
2491 &priv->pkstats.rx_511_bytes_packets, "RX 511 bytes Packets");
2492 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD,
2493 &priv->pkstats.rx_1023_bytes_packets, "RX 1023 bytes Packets");
2494 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD,
2495 &priv->pkstats.rx_1518_bytes_packets, "RX 1518 bytes Packets");
2496 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD,
2497 &priv->pkstats.rx_1522_bytes_packets, "RX 1522 bytes Packets");
2498 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD,
2499 &priv->pkstats.rx_1548_bytes_packets, "RX 1548 bytes Packets");
2500 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD,
2501 &priv->pkstats.rx_gt_1548_bytes_packets,
2502 "RX Greater Then 1548 bytes Packets");
2504 struct mlx4_en_pkt_stats {
2505 unsigned long tx_packets;
2506 unsigned long tx_bytes;
2507 unsigned long tx_multicast_packets;
2508 unsigned long tx_broadcast_packets;
2509 unsigned long tx_errors;
2510 unsigned long tx_dropped;
2511 unsigned long tx_lt_64_bytes_packets;
2512 unsigned long tx_127_bytes_packets;
2513 unsigned long tx_255_bytes_packets;
2514 unsigned long tx_511_bytes_packets;
2515 unsigned long tx_1023_bytes_packets;
2516 unsigned long tx_1518_bytes_packets;
2517 unsigned long tx_1522_bytes_packets;
2518 unsigned long tx_1548_bytes_packets;
2519 unsigned long tx_gt_1548_bytes_packets;
2520 unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
2521 unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
2522 #define NUM_PKT_STATS 72
2526 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
2527 &priv->pkstats.tx_packets, "TX packets");
2528 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
2529 &priv->pkstats.tx_packets, "TX Bytes");
2530 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
2531 &priv->pkstats.tx_multicast_packets, "TX Multicast Packets");
2532 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
2533 &priv->pkstats.tx_broadcast_packets, "TX Broadcast Packets");
2534 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD,
2535 &priv->pkstats.tx_errors, "TX Errors");
2536 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD,
2537 &priv->pkstats.tx_dropped, "TX Dropped");
2538 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD,
2539 &priv->pkstats.tx_lt_64_bytes_packets, "TX Less Then 64 Bytes Packets");
2540 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD,
2541 &priv->pkstats.tx_127_bytes_packets, "TX 127 Bytes Packets");
2542 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD,
2543 &priv->pkstats.tx_255_bytes_packets, "TX 255 Bytes Packets");
2544 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD,
2545 &priv->pkstats.tx_511_bytes_packets, "TX 511 Bytes Packets");
2546 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD,
2547 &priv->pkstats.tx_1023_bytes_packets, "TX 1023 Bytes Packets");
2548 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD,
2549 &priv->pkstats.tx_1518_bytes_packets, "TX 1518 Bytes Packets");
2550 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD,
2551 &priv->pkstats.tx_1522_bytes_packets, "TX 1522 Bytes Packets");
2552 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD,
2553 &priv->pkstats.tx_1548_bytes_packets, "TX 1548 Bytes Packets");
2554 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD,
2555 &priv->pkstats.tx_gt_1548_bytes_packets,
2556 "TX Greater Then 1548 Bytes Packets");
2560 for (i = 0; i < priv->tx_ring_num; i++) {
2561 tx_ring = priv->tx_ring[i];
2562 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i);
2563 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2564 CTLFLAG_RD, NULL, "TX Ring");
2565 ring_list = SYSCTL_CHILDREN(ring_node);
2566 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets",
2567 CTLFLAG_RD, &tx_ring->packets, "TX packets");
2568 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
2569 CTLFLAG_RD, &tx_ring->bytes, "TX bytes");
2572 for (i = 0; i < priv->rx_ring_num; i++) {
2573 rx_ring = priv->rx_ring[i];
2574 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i);
2575 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2576 CTLFLAG_RD, NULL, "RX Ring");
2577 ring_list = SYSCTL_CHILDREN(ring_node);
2578 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets",
2579 CTLFLAG_RD, &rx_ring->packets, "RX packets");
2580 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
2581 CTLFLAG_RD, &rx_ring->bytes, "RX bytes");
2582 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "error",
2583 CTLFLAG_RD, &rx_ring->errors, "RX soft errors");