2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #ifdef CONFIG_NET_RX_BUSY_POLL
38 #include <net/busy_poll.h>
41 #include <linux/list.h>
42 #include <linux/if_ether.h>
44 #include <linux/mlx4/driver.h>
45 #include <linux/mlx4/device.h>
46 #include <linux/mlx4/cmd.h>
47 #include <linux/mlx4/cq.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
55 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv);
56 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv);
57 static int mlx4_en_unit;
59 #ifdef CONFIG_NET_RX_BUSY_POLL
60 /* must be called with local_bh_disable()d */
61 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
63 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
64 struct net_device *dev = cq->dev;
65 struct mlx4_en_priv *priv = netdev_priv(dev);
66 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
70 return LL_FLUSH_FAILED;
72 if (!mlx4_en_cq_lock_poll(cq))
75 done = mlx4_en_process_rx_cq(dev, cq, 4);
76 #ifdef LL_EXTENDED_STATS
78 rx_ring->cleaned += done;
83 mlx4_en_cq_unlock_poll(cq);
87 #endif /* CONFIG_NET_RX_BUSY_POLL */
89 #ifdef CONFIG_RFS_ACCEL
91 struct mlx4_en_filter {
92 struct list_head next;
93 struct work_struct work;
102 struct mlx4_en_priv *priv;
103 u32 flow_id; /* RFS infrastructure id */
104 int id; /* mlx4_en driver id */
105 u64 reg_id; /* Flow steering API id */
106 u8 activated; /* Used to prevent expiry before filter
109 struct hlist_node filter_chain;
112 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
114 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
118 return MLX4_NET_TRANS_RULE_ID_UDP;
120 return MLX4_NET_TRANS_RULE_ID_TCP;
122 return -EPROTONOSUPPORT;
126 static void mlx4_en_filter_work(struct work_struct *work)
128 struct mlx4_en_filter *filter = container_of(work,
129 struct mlx4_en_filter,
131 struct mlx4_en_priv *priv = filter->priv;
132 struct mlx4_spec_list spec_tcp_udp = {
133 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
136 .dst_port = filter->dst_port,
137 .dst_port_msk = (__force __be16)-1,
138 .src_port = filter->src_port,
139 .src_port_msk = (__force __be16)-1,
143 struct mlx4_spec_list spec_ip = {
144 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
147 .dst_ip = filter->dst_ip,
148 .dst_ip_msk = (__force __be32)-1,
149 .src_ip = filter->src_ip,
150 .src_ip_msk = (__force __be32)-1,
154 struct mlx4_spec_list spec_eth = {
155 .id = MLX4_NET_TRANS_RULE_ID_ETH,
157 struct mlx4_net_trans_rule rule = {
158 .list = LIST_HEAD_INIT(rule.list),
159 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
162 .promisc_mode = MLX4_FS_REGULAR,
164 .priority = MLX4_DOMAIN_RFS,
167 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
169 if (spec_tcp_udp.id < 0) {
170 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
174 list_add_tail(&spec_eth.list, &rule.list);
175 list_add_tail(&spec_ip.list, &rule.list);
176 list_add_tail(&spec_tcp_udp.list, &rule.list);
178 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
179 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
180 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
182 filter->activated = 0;
184 if (filter->reg_id) {
185 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
186 if (rc && rc != -ENOENT)
187 en_err(priv, "Error detaching flow. rc = %d\n", rc);
190 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
192 en_err(priv, "Error attaching flow. err = %d\n", rc);
195 mlx4_en_filter_rfs_expire(priv);
197 filter->activated = 1;
200 static inline struct hlist_head *
201 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
202 __be16 src_port, __be16 dst_port)
207 l = (__force unsigned long)src_port |
208 ((__force unsigned long)dst_port << 2);
209 l ^= (__force unsigned long)(src_ip ^ dst_ip);
211 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
213 return &priv->filter_hash[bucket_idx];
216 static struct mlx4_en_filter *
217 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
218 __be32 dst_ip, u8 ip_proto, __be16 src_port,
219 __be16 dst_port, u32 flow_id)
221 struct mlx4_en_filter *filter = NULL;
223 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
228 filter->rxq_index = rxq_index;
229 INIT_WORK(&filter->work, mlx4_en_filter_work);
231 filter->src_ip = src_ip;
232 filter->dst_ip = dst_ip;
233 filter->ip_proto = ip_proto;
234 filter->src_port = src_port;
235 filter->dst_port = dst_port;
237 filter->flow_id = flow_id;
239 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
241 list_add_tail(&filter->next, &priv->filters);
242 hlist_add_head(&filter->filter_chain,
243 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
249 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
251 struct mlx4_en_priv *priv = filter->priv;
254 list_del(&filter->next);
256 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
257 if (rc && rc != -ENOENT)
258 en_err(priv, "Error detaching flow. rc = %d\n", rc);
263 static inline struct mlx4_en_filter *
264 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
265 u8 ip_proto, __be16 src_port, __be16 dst_port)
267 struct hlist_node *elem;
268 struct mlx4_en_filter *filter;
269 struct mlx4_en_filter *ret = NULL;
271 hlist_for_each_entry(filter, elem,
272 filter_hash_bucket(priv, src_ip, dst_ip,
275 if (filter->src_ip == src_ip &&
276 filter->dst_ip == dst_ip &&
277 filter->ip_proto == ip_proto &&
278 filter->src_port == src_port &&
279 filter->dst_port == dst_port) {
289 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
290 u16 rxq_index, u32 flow_id)
292 struct mlx4_en_priv *priv = netdev_priv(net_dev);
293 struct mlx4_en_filter *filter;
294 const struct iphdr *ip;
301 int nhoff = skb_network_offset(skb);
304 if (skb->protocol != htons(ETH_P_IP))
305 return -EPROTONOSUPPORT;
307 ip = (const struct iphdr *)(skb->data + nhoff);
308 if (ip_is_fragment(ip))
309 return -EPROTONOSUPPORT;
311 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
312 return -EPROTONOSUPPORT;
313 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
315 ip_proto = ip->protocol;
321 spin_lock_bh(&priv->filters_lock);
322 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
325 if (filter->rxq_index == rxq_index)
328 filter->rxq_index = rxq_index;
330 filter = mlx4_en_filter_alloc(priv, rxq_index,
331 src_ip, dst_ip, ip_proto,
332 src_port, dst_port, flow_id);
339 queue_work(priv->mdev->workqueue, &filter->work);
344 spin_unlock_bh(&priv->filters_lock);
349 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
350 struct mlx4_en_rx_ring *rx_ring)
352 struct mlx4_en_filter *filter, *tmp;
355 spin_lock_bh(&priv->filters_lock);
356 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
357 list_move(&filter->next, &del_list);
358 hlist_del(&filter->filter_chain);
360 spin_unlock_bh(&priv->filters_lock);
362 list_for_each_entry_safe(filter, tmp, &del_list, next) {
363 cancel_work_sync(&filter->work);
364 mlx4_en_filter_free(filter);
368 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
370 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
374 spin_lock_bh(&priv->filters_lock);
375 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
376 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
379 if (filter->activated &&
380 !work_pending(&filter->work) &&
381 rps_may_expire_flow(priv->dev,
382 filter->rxq_index, filter->flow_id,
384 list_move(&filter->next, &del_list);
385 hlist_del(&filter->filter_chain);
387 last_filter = filter;
392 if (last_filter && (&last_filter->next != priv->filters.next))
393 list_move(&priv->filters, &last_filter->next);
395 spin_unlock_bh(&priv->filters_lock);
397 list_for_each_entry_safe(filter, tmp, &del_list, next)
398 mlx4_en_filter_free(filter);
402 static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid)
404 struct mlx4_en_priv *priv = netdev_priv(dev);
405 struct mlx4_en_dev *mdev = priv->mdev;
412 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
414 set_bit(vid, priv->active_vlans);
416 /* Add VID to port VLAN filter */
417 mutex_lock(&mdev->state_lock);
418 if (mdev->device_up && priv->port_up) {
419 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
421 en_err(priv, "Failed configuring VLAN filter\n");
423 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
424 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
425 mutex_unlock(&mdev->state_lock);
429 static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid)
431 struct mlx4_en_priv *priv = netdev_priv(dev);
432 struct mlx4_en_dev *mdev = priv->mdev;
438 en_dbg(HW, priv, "Killing VID:%d\n", vid);
440 clear_bit(vid, priv->active_vlans);
442 /* Remove VID from port VLAN filter */
443 mutex_lock(&mdev->state_lock);
444 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
446 if (mdev->device_up && priv->port_up) {
447 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
449 en_err(priv, "Failed configuring VLAN filter\n");
451 mutex_unlock(&mdev->state_lock);
455 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
456 unsigned char *mac, int *qpn, u64 *reg_id)
458 struct mlx4_en_dev *mdev = priv->mdev;
459 struct mlx4_dev *dev = mdev->dev;
462 switch (dev->caps.steering_mode) {
463 case MLX4_STEERING_MODE_B0: {
468 memcpy(&gid[10], mac, ETH_ALEN);
471 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
474 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
475 struct mlx4_spec_list spec_eth = { {NULL} };
476 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
478 struct mlx4_net_trans_rule rule = {
479 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
482 .promisc_mode = MLX4_FS_REGULAR,
483 .priority = MLX4_DOMAIN_NIC,
486 rule.port = priv->port;
488 INIT_LIST_HEAD(&rule.list);
490 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
491 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
492 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
493 list_add_tail(&spec_eth.list, &rule.list);
495 err = mlx4_flow_attach(dev, &rule, reg_id);
502 en_warn(priv, "Failed Attaching Unicast\n");
507 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
508 unsigned char *mac, int qpn, u64 reg_id)
510 struct mlx4_en_dev *mdev = priv->mdev;
511 struct mlx4_dev *dev = mdev->dev;
513 switch (dev->caps.steering_mode) {
514 case MLX4_STEERING_MODE_B0: {
519 memcpy(&gid[10], mac, ETH_ALEN);
522 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
525 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
526 mlx4_flow_detach(dev, reg_id);
530 en_err(priv, "Invalid steering mode.\n");
534 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
536 struct mlx4_en_dev *mdev = priv->mdev;
537 struct mlx4_dev *dev = mdev->dev;
538 struct mlx4_mac_entry *entry;
542 int *qpn = &priv->base_qpn;
543 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
545 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
546 IF_LLADDR(priv->dev));
547 index = mlx4_register_mac(dev, priv->port, mac);
550 en_err(priv, "Failed adding MAC: %pM\n",
551 IF_LLADDR(priv->dev));
555 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
556 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
557 *qpn = base_qpn + index;
561 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, 0);
562 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
564 en_err(priv, "Failed to reserve qp for mac registration\n");
568 err = mlx4_en_uc_steer_add(priv, IF_LLADDR(priv->dev), qpn, ®_id);
572 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
577 memcpy(entry->mac, IF_LLADDR(priv->dev), sizeof(entry->mac));
578 entry->reg_id = reg_id;
580 hlist_add_head(&entry->hlist,
581 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
586 mlx4_en_uc_steer_release(priv, IF_LLADDR(priv->dev), *qpn, reg_id);
589 mlx4_qp_release_range(dev, *qpn, 1);
592 mlx4_unregister_mac(dev, priv->port, mac);
596 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
598 struct mlx4_en_dev *mdev = priv->mdev;
599 struct mlx4_dev *dev = mdev->dev;
600 int qpn = priv->base_qpn;
603 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
604 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
605 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
606 IF_LLADDR(priv->dev));
607 mlx4_unregister_mac(dev, priv->port, mac);
609 struct mlx4_mac_entry *entry;
610 struct hlist_node *n, *tmp;
611 struct hlist_head *bucket;
614 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
615 bucket = &priv->mac_hash[i];
616 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
617 mac = mlx4_mac_to_u64(entry->mac);
618 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
620 mlx4_en_uc_steer_release(priv, entry->mac,
623 mlx4_unregister_mac(dev, priv->port, mac);
624 hlist_del(&entry->hlist);
629 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
631 mlx4_qp_release_range(dev, qpn, 1);
632 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
636 static void mlx4_en_clear_list(struct net_device *dev)
638 struct mlx4_en_priv *priv = netdev_priv(dev);
639 struct mlx4_en_mc_list *tmp, *mc_to_del;
641 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
642 list_del(&mc_to_del->list);
647 static void mlx4_en_cache_mclist(struct net_device *dev)
649 struct ifmultiaddr *ifma;
650 struct mlx4_en_mc_list *tmp;
651 struct mlx4_en_priv *priv = netdev_priv(dev);
653 TAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) {
654 if (ifma->ifma_addr->sa_family != AF_LINK)
656 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen !=
659 /* Make sure the list didn't grow. */
660 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
662 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN);
663 list_add_tail(&tmp->list, &priv->mc_list);
667 static void update_mclist_flags(struct mlx4_en_priv *priv,
668 struct list_head *dst,
669 struct list_head *src)
671 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
674 /* Find all the entries that should be removed from dst,
675 * These are the entries that are not found in src
677 list_for_each_entry(dst_tmp, dst, list) {
679 list_for_each_entry(src_tmp, src, list) {
680 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
686 dst_tmp->action = MCLIST_REM;
689 /* Add entries that exist in src but not in dst
690 * mark them as need to add
692 list_for_each_entry(src_tmp, src, list) {
694 list_for_each_entry(dst_tmp, dst, list) {
695 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
696 dst_tmp->action = MCLIST_NONE;
702 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
705 en_err(priv, "Failed to allocate current multicast list\n");
708 memcpy(new_mc, src_tmp,
709 sizeof(struct mlx4_en_mc_list));
710 new_mc->action = MCLIST_ADD;
711 list_add_tail(&new_mc->list, dst);
716 static void mlx4_en_set_rx_mode(struct net_device *dev)
718 struct mlx4_en_priv *priv = netdev_priv(dev);
723 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
726 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
727 struct mlx4_en_dev *mdev)
730 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
731 priv->flags |= MLX4_EN_FLAG_PROMISC;
733 /* Enable promiscouos mode */
734 switch (mdev->dev->caps.steering_mode) {
735 case MLX4_STEERING_MODE_DEVICE_MANAGED:
736 err = mlx4_flow_steer_promisc_add(mdev->dev,
739 MLX4_FS_ALL_DEFAULT);
741 en_err(priv, "Failed enabling promiscuous mode\n");
742 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
745 case MLX4_STEERING_MODE_B0:
746 err = mlx4_unicast_promisc_add(mdev->dev,
750 en_err(priv, "Failed enabling unicast promiscuous mode\n");
752 /* Add the default qp number as multicast
755 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
756 err = mlx4_multicast_promisc_add(mdev->dev,
760 en_err(priv, "Failed enabling multicast promiscuous mode\n");
761 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
765 case MLX4_STEERING_MODE_A0:
766 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
771 en_err(priv, "Failed enabling promiscuous mode\n");
775 /* Disable port multicast filter (unconditionally) */
776 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
777 0, MLX4_MCAST_DISABLE);
779 en_err(priv, "Failed disabling multicast filter\n");
783 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
784 struct mlx4_en_dev *mdev)
788 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
790 /* Disable promiscouos mode */
791 switch (mdev->dev->caps.steering_mode) {
792 case MLX4_STEERING_MODE_DEVICE_MANAGED:
793 err = mlx4_flow_steer_promisc_remove(mdev->dev,
795 MLX4_FS_ALL_DEFAULT);
797 en_err(priv, "Failed disabling promiscuous mode\n");
798 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
801 case MLX4_STEERING_MODE_B0:
802 err = mlx4_unicast_promisc_remove(mdev->dev,
806 en_err(priv, "Failed disabling unicast promiscuous mode\n");
807 /* Disable Multicast promisc */
808 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
809 err = mlx4_multicast_promisc_remove(mdev->dev,
813 en_err(priv, "Failed disabling multicast promiscuous mode\n");
814 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
818 case MLX4_STEERING_MODE_A0:
819 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
823 en_err(priv, "Failed disabling promiscuous mode\n");
828 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
829 struct net_device *dev,
830 struct mlx4_en_dev *mdev)
832 struct mlx4_en_mc_list *mclist, *tmp;
833 u8 mc_list[16] = {0};
838 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
839 if (dev->if_flags & IFF_ALLMULTI) {
840 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
841 0, MLX4_MCAST_DISABLE);
843 en_err(priv, "Failed disabling multicast filter\n");
845 /* Add the default qp number as multicast promisc */
846 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
847 switch (mdev->dev->caps.steering_mode) {
848 case MLX4_STEERING_MODE_DEVICE_MANAGED:
849 err = mlx4_flow_steer_promisc_add(mdev->dev,
855 case MLX4_STEERING_MODE_B0:
856 err = mlx4_multicast_promisc_add(mdev->dev,
861 case MLX4_STEERING_MODE_A0:
865 en_err(priv, "Failed entering multicast promisc mode\n");
866 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
869 /* Disable Multicast promisc */
870 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
871 switch (mdev->dev->caps.steering_mode) {
872 case MLX4_STEERING_MODE_DEVICE_MANAGED:
873 err = mlx4_flow_steer_promisc_remove(mdev->dev,
878 case MLX4_STEERING_MODE_B0:
879 err = mlx4_multicast_promisc_remove(mdev->dev,
884 case MLX4_STEERING_MODE_A0:
888 en_err(priv, "Failed disabling multicast promiscuous mode\n");
889 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
892 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
893 0, MLX4_MCAST_DISABLE);
895 en_err(priv, "Failed disabling multicast filter\n");
897 /* Flush mcast filter and init it with broadcast address */
898 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
899 1, MLX4_MCAST_CONFIG);
901 /* Update multicast list - we cache all addresses so they won't
902 * change while HW is updated holding the command semaphor */
903 mlx4_en_cache_mclist(dev);
904 list_for_each_entry(mclist, &priv->mc_list, list) {
905 mcast_addr = mlx4_mac_to_u64(mclist->addr);
906 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
907 mcast_addr, 0, MLX4_MCAST_CONFIG);
909 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
910 0, MLX4_MCAST_ENABLE);
912 en_err(priv, "Failed enabling multicast filter\n");
914 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
915 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
916 if (mclist->action == MCLIST_REM) {
917 /* detach this address and delete from list */
918 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
919 mc_list[5] = priv->port;
920 err = mlx4_multicast_detach(mdev->dev,
921 &priv->rss_map.indir_qp,
926 en_err(priv, "Fail to detach multicast address\n");
928 /* remove from list */
929 list_del(&mclist->list);
931 } else if (mclist->action == MCLIST_ADD) {
932 /* attach the address */
933 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
934 /* needed for B0 steering support */
935 mc_list[5] = priv->port;
936 err = mlx4_multicast_attach(mdev->dev,
937 &priv->rss_map.indir_qp,
943 en_err(priv, "Fail to attach multicast address\n");
950 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
952 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
954 struct mlx4_en_dev *mdev = priv->mdev;
955 struct net_device *dev = priv->dev;
958 mutex_lock(&mdev->state_lock);
959 if (!mdev->device_up) {
960 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
963 if (!priv->port_up) {
964 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
967 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
968 if (priv->port_state.link_state) {
969 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
970 /* Important note: the following call for if_link_state_change
971 * is needed for interface up scenario (start port, link state
973 /* update netif baudrate */
974 priv->dev->if_baudrate =
975 IF_Mbps(priv->port_state.link_speed);
976 if_link_state_change(priv->dev, LINK_STATE_UP);
977 en_dbg(HW, priv, "Link Up\n");
981 /* Promsicuous mode: disable all filters */
982 if ((dev->if_flags & IFF_PROMISC) ||
983 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
984 mlx4_en_set_promisc_mode(priv, mdev);
988 /* Not in promiscuous mode */
989 if (priv->flags & MLX4_EN_FLAG_PROMISC)
990 mlx4_en_clear_promisc_mode(priv, mdev);
992 mlx4_en_do_multicast(priv, dev, mdev);
994 mutex_unlock(&mdev->state_lock);
997 #ifdef CONFIG_NET_POLL_CONTROLLER
998 static void mlx4_en_netpoll(struct net_device *dev)
1000 struct mlx4_en_priv *priv = netdev_priv(dev);
1001 struct mlx4_en_cq *cq;
1002 unsigned long flags;
1005 for (i = 0; i < priv->rx_ring_num; i++) {
1006 cq = priv->rx_cq[i];
1007 spin_lock_irqsave(&cq->lock, flags);
1008 napi_synchronize(&cq->napi);
1009 mlx4_en_process_rx_cq(dev, cq, 0);
1010 spin_unlock_irqrestore(&cq->lock, flags);
1015 static void mlx4_en_watchdog_timeout(void *arg)
1017 struct mlx4_en_priv *priv = arg;
1018 struct mlx4_en_dev *mdev = priv->mdev;
1020 en_dbg(DRV, priv, "Scheduling watchdog\n");
1021 queue_work(mdev->workqueue, &priv->watchdog_task);
1023 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1024 mlx4_en_watchdog_timeout, priv);
1029 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1031 struct mlx4_en_cq *cq;
1034 /* If we haven't received a specific coalescing setting
1035 * (module param), we set the moderation parameters as follows:
1036 * - moder_cnt is set to the number of mtu sized packets to
1037 * satisfy our coelsing target.
1038 * - moder_time is set to a fixed value.
1040 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->if_mtu + 1;
1041 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1042 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1043 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1044 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - "
1045 "rx_frames:%d rx_usecs:%d\n",
1046 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs);
1048 /* Setup cq moderation params */
1049 for (i = 0; i < priv->rx_ring_num; i++) {
1050 cq = priv->rx_cq[i];
1051 cq->moder_cnt = priv->rx_frames;
1052 cq->moder_time = priv->rx_usecs;
1053 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1054 priv->last_moder_packets[i] = 0;
1055 priv->last_moder_bytes[i] = 0;
1058 for (i = 0; i < priv->tx_ring_num; i++) {
1059 cq = priv->tx_cq[i];
1060 cq->moder_cnt = priv->tx_frames;
1061 cq->moder_time = priv->tx_usecs;
1064 /* Reset auto-moderation params */
1065 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1066 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1067 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1068 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1069 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1070 priv->adaptive_rx_coal = 1;
1071 priv->last_moder_jiffies = 0;
1072 priv->last_moder_tx_packets = 0;
1075 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1077 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1078 struct mlx4_en_cq *cq;
1079 unsigned long packets;
1081 unsigned long avg_pkt_size;
1082 unsigned long rx_packets;
1083 unsigned long rx_bytes;
1084 unsigned long rx_pkt_diff;
1088 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1091 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1092 spin_lock(&priv->stats_lock);
1093 rx_packets = priv->rx_ring[ring]->packets;
1094 rx_bytes = priv->rx_ring[ring]->bytes;
1095 spin_unlock(&priv->stats_lock);
1097 rx_pkt_diff = ((unsigned long) (rx_packets -
1098 priv->last_moder_packets[ring]));
1099 packets = rx_pkt_diff;
1100 rate = packets * HZ / period;
1101 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1102 priv->last_moder_bytes[ring])) / packets : 0;
1104 /* Apply auto-moderation only when packet rate
1105 * exceeds a rate that it matters */
1106 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1107 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1108 if (rate < priv->pkt_rate_low)
1109 moder_time = priv->rx_usecs_low;
1110 else if (rate > priv->pkt_rate_high)
1111 moder_time = priv->rx_usecs_high;
1113 moder_time = (rate - priv->pkt_rate_low) *
1114 (priv->rx_usecs_high - priv->rx_usecs_low) /
1115 (priv->pkt_rate_high - priv->pkt_rate_low) +
1118 moder_time = priv->rx_usecs_low;
1121 if (moder_time != priv->last_moder_time[ring]) {
1122 priv->last_moder_time[ring] = moder_time;
1123 cq = priv->rx_cq[ring];
1124 cq->moder_time = moder_time;
1125 err = mlx4_en_set_cq_moder(priv, cq);
1127 en_err(priv, "Failed modifying moderation for cq:%d\n",
1130 priv->last_moder_packets[ring] = rx_packets;
1131 priv->last_moder_bytes[ring] = rx_bytes;
1134 priv->last_moder_jiffies = jiffies;
1137 static void mlx4_en_do_get_stats(struct work_struct *work)
1139 struct delayed_work *delay = to_delayed_work(work);
1140 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1142 struct mlx4_en_dev *mdev = priv->mdev;
1145 mutex_lock(&mdev->state_lock);
1146 if (mdev->device_up) {
1147 if (priv->port_up) {
1148 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1150 en_dbg(HW, priv, "Could not update stats\n");
1152 mlx4_en_auto_moderation(priv);
1155 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1157 mutex_unlock(&mdev->state_lock);
1160 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1163 static void mlx4_en_service_task(struct work_struct *work)
1165 struct delayed_work *delay = to_delayed_work(work);
1166 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1168 struct mlx4_en_dev *mdev = priv->mdev;
1170 mutex_lock(&mdev->state_lock);
1171 if (mdev->device_up) {
1172 queue_delayed_work(mdev->workqueue, &priv->service_task,
1173 SERVICE_TASK_DELAY);
1175 mutex_unlock(&mdev->state_lock);
1178 static void mlx4_en_linkstate(struct work_struct *work)
1180 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1182 struct mlx4_en_dev *mdev = priv->mdev;
1183 int linkstate = priv->link_state;
1185 mutex_lock(&mdev->state_lock);
1186 /* If observable port state changed set carrier state and
1187 * report to system log */
1188 if (priv->last_link_state != linkstate) {
1189 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1190 en_info(priv, "Link Down\n");
1191 if_link_state_change(priv->dev, LINK_STATE_DOWN);
1192 /* update netif baudrate */
1193 priv->dev->if_baudrate = 0;
1195 /* make sure the port is up before notifying the OS.
1196 * This is tricky since we get here on INIT_PORT and
1197 * in such case we can't tell the OS the port is up.
1198 * To solve this there is a call to if_link_state_change
1201 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){
1202 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
1203 en_info(priv, "Query port failed\n");
1204 priv->dev->if_baudrate =
1205 IF_Mbps(priv->port_state.link_speed);
1206 en_info(priv, "Link Up\n");
1207 if_link_state_change(priv->dev, LINK_STATE_UP);
1210 priv->last_link_state = linkstate;
1211 mutex_unlock(&mdev->state_lock);
1215 int mlx4_en_start_port(struct net_device *dev)
1217 struct mlx4_en_priv *priv = netdev_priv(dev);
1218 struct mlx4_en_dev *mdev = priv->mdev;
1219 struct mlx4_en_cq *cq;
1220 struct mlx4_en_tx_ring *tx_ring;
1226 u8 mc_list[16] = {0};
1229 if (priv->port_up) {
1230 en_dbg(DRV, priv, "start port called while port already up\n");
1234 INIT_LIST_HEAD(&priv->mc_list);
1235 INIT_LIST_HEAD(&priv->curr_list);
1236 INIT_LIST_HEAD(&priv->ethtool_list);
1238 /* Calculate Rx buf size */
1239 dev->if_mtu = min(dev->if_mtu, priv->max_mtu);
1240 mlx4_en_calc_rx_buf(dev);
1241 priv->rx_alloc_size = max_t(int, 2 * roundup_pow_of_two(priv->rx_mb_size),
1243 priv->rx_alloc_order = get_order(priv->rx_alloc_size);
1244 priv->rx_buf_size = roundup_pow_of_two(priv->rx_mb_size);
1245 priv->log_rx_info = ROUNDUP_LOG2(sizeof(struct mlx4_en_rx_buf));
1246 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size);
1248 /* Configure rx cq's and rings */
1249 err = mlx4_en_activate_rx_rings(priv);
1251 en_err(priv, "Failed to activate RX rings\n");
1254 for (i = 0; i < priv->rx_ring_num; i++) {
1255 cq = priv->rx_cq[i];
1257 mlx4_en_cq_init_lock(cq);
1258 err = mlx4_en_activate_cq(priv, cq, i);
1260 en_err(priv, "Failed activating Rx CQ\n");
1263 for (j = 0; j < cq->size; j++)
1264 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1265 err = mlx4_en_set_cq_moder(priv, cq);
1267 en_err(priv, "Failed setting cq moderation parameters");
1268 mlx4_en_deactivate_cq(priv, cq);
1271 mlx4_en_arm_cq(priv, cq);
1272 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1277 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1278 err = mlx4_en_get_qp(priv);
1280 en_err(priv, "Failed getting eth qp\n");
1283 mdev->mac_removed[priv->port] = 0;
1285 /* gets default allocated counter index from func cap */
1286 /* or sink counter index if no resources */
1287 priv->counter_index = mdev->dev->caps.def_counter_index[priv->port - 1];
1289 en_dbg(DRV, priv, "%s: default counter index %d for port %d\n",
1290 __func__, priv->counter_index, priv->port);
1292 err = mlx4_en_config_rss_steer(priv);
1294 en_err(priv, "Failed configuring rss steering\n");
1298 err = mlx4_en_create_drop_qp(priv);
1302 /* Configure tx cq's and rings */
1303 for (i = 0; i < priv->tx_ring_num; i++) {
1305 cq = priv->tx_cq[i];
1306 err = mlx4_en_activate_cq(priv, cq, i);
1308 en_err(priv, "Failed activating Tx CQ\n");
1311 err = mlx4_en_set_cq_moder(priv, cq);
1313 en_err(priv, "Failed setting cq moderation parameters");
1314 mlx4_en_deactivate_cq(priv, cq);
1317 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1318 cq->buf->wqe_index = cpu_to_be16(0xffff);
1320 /* Configure ring */
1321 tx_ring = priv->tx_ring[i];
1323 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1324 i / priv->num_tx_rings_p_up);
1326 en_err(priv, "Failed activating Tx ring %d\n", i);
1327 mlx4_en_deactivate_cq(priv, cq);
1331 /* Arm CQ for TX completions */
1332 mlx4_en_arm_cq(priv, cq);
1334 /* Set initial ownership of all Tx TXBBs to SW (1) */
1335 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1336 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1340 /* Configure port */
1341 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1343 priv->prof->tx_pause,
1345 priv->prof->rx_pause,
1346 priv->prof->rx_ppp);
1348 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1352 /* Set default qp number */
1353 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1355 en_err(priv, "Failed setting default qp numbers\n");
1360 en_dbg(HW, priv, "Initializing port\n");
1361 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1363 en_err(priv, "Failed Initializing port\n");
1367 /* Attach rx QP to bradcast address */
1368 memset(&mc_list[10], 0xff, ETH_ALEN);
1369 mc_list[5] = priv->port; /* needed for B0 steering support */
1370 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1371 priv->port, 0, MLX4_PROT_ETH,
1372 &priv->broadcast_id))
1373 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1375 /* Must redo promiscuous mode setup. */
1376 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1378 /* Schedule multicast task to populate multicast list */
1379 queue_work(mdev->workqueue, &priv->rx_mode_task);
1381 mlx4_set_stats_bitmap(mdev->dev, priv->stats_bitmap);
1383 priv->port_up = true;
1385 /* Enable the queues. */
1386 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1387 dev->if_drv_flags |= IFF_DRV_RUNNING;
1388 #ifdef CONFIG_DEBUG_FS
1389 mlx4_en_create_debug_files(priv);
1391 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1392 mlx4_en_watchdog_timeout, priv);
1398 while (tx_index--) {
1399 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1400 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1402 mlx4_en_destroy_drop_qp(priv);
1404 mlx4_en_release_rss_steer(priv);
1406 mlx4_en_put_qp(priv);
1409 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1410 for (i = 0; i < priv->rx_ring_num; i++)
1411 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1413 return err; /* need to close devices */
1417 void mlx4_en_stop_port(struct net_device *dev)
1419 struct mlx4_en_priv *priv = netdev_priv(dev);
1420 struct mlx4_en_dev *mdev = priv->mdev;
1421 struct mlx4_en_mc_list *mclist, *tmp;
1423 u8 mc_list[16] = {0};
1425 if (!priv->port_up) {
1426 en_dbg(DRV, priv, "stop port called while port already down\n");
1430 #ifdef CONFIG_DEBUG_FS
1431 mlx4_en_delete_debug_files(priv);
1435 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1437 /* Set port as not active */
1438 priv->port_up = false;
1439 if (priv->counter_index != 0xff) {
1440 mlx4_counter_free(mdev->dev, priv->port, priv->counter_index);
1441 priv->counter_index = 0xff;
1444 /* Promsicuous mode */
1445 if (mdev->dev->caps.steering_mode ==
1446 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1447 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1448 MLX4_EN_FLAG_MC_PROMISC);
1449 mlx4_flow_steer_promisc_remove(mdev->dev,
1451 MLX4_FS_ALL_DEFAULT);
1452 mlx4_flow_steer_promisc_remove(mdev->dev,
1454 MLX4_FS_MC_DEFAULT);
1455 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1456 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1458 /* Disable promiscouos mode */
1459 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1462 /* Disable Multicast promisc */
1463 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1464 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1466 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1470 /* Detach All multicasts */
1471 memset(&mc_list[10], 0xff, ETH_ALEN);
1472 mc_list[5] = priv->port; /* needed for B0 steering support */
1473 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1474 MLX4_PROT_ETH, priv->broadcast_id);
1475 list_for_each_entry(mclist, &priv->curr_list, list) {
1476 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1477 mc_list[5] = priv->port;
1478 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1479 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1481 mlx4_en_clear_list(dev);
1482 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1483 list_del(&mclist->list);
1487 /* Flush multicast filter */
1488 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1489 mlx4_en_destroy_drop_qp(priv);
1492 for (i = 0; i < priv->tx_ring_num; i++) {
1493 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1494 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1498 for (i = 0; i < priv->tx_ring_num; i++)
1499 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1502 mlx4_en_release_rss_steer(priv);
1504 /* Unregister Mac address for the port */
1505 mlx4_en_put_qp(priv);
1506 mdev->mac_removed[priv->port] = 1;
1509 for (i = 0; i < priv->rx_ring_num; i++) {
1510 struct mlx4_en_cq *cq = priv->rx_cq[i];
1511 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1512 mlx4_en_deactivate_cq(priv, cq);
1515 callout_stop(&priv->watchdog_timer);
1517 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1520 static void mlx4_en_restart(struct work_struct *work)
1522 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1524 struct mlx4_en_dev *mdev = priv->mdev;
1525 struct net_device *dev = priv->dev;
1526 struct mlx4_en_tx_ring *ring;
1530 if (priv->blocked == 0 || priv->port_up == 0)
1532 for (i = 0; i < priv->tx_ring_num; i++) {
1533 ring = priv->tx_ring[i];
1534 if (ring->blocked &&
1535 ring->watchdog_time + MLX4_EN_WATCHDOG_TIMEOUT < ticks)
1541 priv->port_stats.tx_timeout++;
1542 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1544 mutex_lock(&mdev->state_lock);
1545 if (priv->port_up) {
1546 mlx4_en_stop_port(dev);
1547 //for (i = 0; i < priv->tx_ring_num; i++)
1548 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue);
1549 if (mlx4_en_start_port(dev))
1550 en_err(priv, "Failed restarting port %d\n", priv->port);
1552 mutex_unlock(&mdev->state_lock);
1555 static void mlx4_en_clear_stats(struct net_device *dev)
1557 struct mlx4_en_priv *priv = netdev_priv(dev);
1558 struct mlx4_en_dev *mdev = priv->mdev;
1561 if (!mlx4_is_slave(mdev->dev))
1562 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1563 en_dbg(HW, priv, "Failed dumping statistics\n");
1565 memset(&priv->pstats, 0, sizeof(priv->pstats));
1566 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1567 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1568 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats));
1570 for (i = 0; i < priv->tx_ring_num; i++) {
1571 priv->tx_ring[i]->bytes = 0;
1572 priv->tx_ring[i]->packets = 0;
1573 priv->tx_ring[i]->tx_csum = 0;
1575 for (i = 0; i < priv->rx_ring_num; i++) {
1576 priv->rx_ring[i]->bytes = 0;
1577 priv->rx_ring[i]->packets = 0;
1578 priv->rx_ring[i]->csum_ok = 0;
1579 priv->rx_ring[i]->csum_none = 0;
1583 static void mlx4_en_open(void* arg)
1586 struct mlx4_en_priv *priv;
1587 struct mlx4_en_dev *mdev;
1588 struct net_device *dev;
1596 mutex_lock(&mdev->state_lock);
1598 if (!mdev->device_up) {
1599 en_err(priv, "Cannot open - device down/disabled\n");
1603 /* Reset HW statistics and SW counters */
1604 mlx4_en_clear_stats(dev);
1606 err = mlx4_en_start_port(dev);
1608 en_err(priv, "Failed starting port:%d\n", priv->port);
1611 mutex_unlock(&mdev->state_lock);
1615 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1619 #ifdef CONFIG_RFS_ACCEL
1620 if (priv->dev->rx_cpu_rmap) {
1621 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1622 priv->dev->rx_cpu_rmap = NULL;
1626 for (i = 0; i < priv->tx_ring_num; i++) {
1627 if (priv->tx_ring && priv->tx_ring[i])
1628 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1629 if (priv->tx_cq && priv->tx_cq[i])
1630 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1633 for (i = 0; i < priv->rx_ring_num; i++) {
1634 if (priv->rx_ring[i])
1635 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1636 priv->prof->rx_ring_size, priv->stride);
1638 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1642 sysctl_ctx_free(&priv->stat_ctx);
1647 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1649 struct mlx4_en_port_profile *prof = priv->prof;
1653 /* Create rx Rings */
1654 for (i = 0; i < priv->rx_ring_num; i++) {
1655 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1656 prof->rx_ring_size, i, RX, node))
1659 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1660 prof->rx_ring_size, node))
1664 /* Create tx Rings */
1665 for (i = 0; i < priv->tx_ring_num; i++) {
1666 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1667 prof->tx_ring_size, i, TX, node))
1670 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1671 prof->tx_ring_size, TXBB_SIZE, node, i))
1675 #ifdef CONFIG_RFS_ACCEL
1676 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
1677 if (!priv->dev->rx_cpu_rmap)
1680 /* Re-create stat sysctls in case the number of rings changed. */
1681 mlx4_en_sysctl_stat(priv);
1685 en_err(priv, "Failed to allocate NIC resources\n");
1686 for (i = 0; i < priv->rx_ring_num; i++) {
1687 if (priv->rx_ring[i])
1688 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1692 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1694 for (i = 0; i < priv->tx_ring_num; i++) {
1695 if (priv->tx_ring[i])
1696 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1698 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1700 priv->port_up = false;
1704 struct en_port_attribute {
1705 struct attribute attr;
1706 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf);
1707 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count);
1710 #define PORT_ATTR_RO(_name) \
1711 struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name)
1713 #define EN_PORT_ATTR(_name, _mode, _show, _store) \
1714 struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store)
1716 void mlx4_en_destroy_netdev(struct net_device *dev)
1718 struct mlx4_en_priv *priv = netdev_priv(dev);
1719 struct mlx4_en_dev *mdev = priv->mdev;
1721 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
1723 if (priv->vlan_attach != NULL)
1724 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
1725 if (priv->vlan_detach != NULL)
1726 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
1728 /* Unregister device - this will close the port if it was up */
1729 if (priv->registered)
1730 ether_ifdetach(dev);
1732 if (priv->allocated)
1733 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
1735 mutex_lock(&mdev->state_lock);
1736 mlx4_en_stop_port(dev);
1737 mutex_unlock(&mdev->state_lock);
1740 cancel_delayed_work(&priv->stats_task);
1741 cancel_delayed_work(&priv->service_task);
1742 /* flush any pending task for this netdev */
1743 flush_workqueue(mdev->workqueue);
1744 callout_drain(&priv->watchdog_timer);
1746 /* Detach the netdev so tasks would not attempt to access it */
1747 mutex_lock(&mdev->state_lock);
1748 mdev->pndev[priv->port] = NULL;
1749 mutex_unlock(&mdev->state_lock);
1752 mlx4_en_free_resources(priv);
1754 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */
1756 sysctl_ctx_free(&priv->conf_ctx);
1758 kfree(priv->tx_ring);
1766 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1768 struct mlx4_en_priv *priv = netdev_priv(dev);
1769 struct mlx4_en_dev *mdev = priv->mdev;
1772 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n",
1773 (unsigned)dev->if_mtu, (unsigned)new_mtu);
1775 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
1776 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
1779 mutex_lock(&mdev->state_lock);
1780 dev->if_mtu = new_mtu;
1781 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1782 if (!mdev->device_up) {
1783 /* NIC is probably restarting - let watchdog task reset
1785 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
1787 mlx4_en_stop_port(dev);
1788 err = mlx4_en_start_port(dev);
1790 en_err(priv, "Failed restarting port:%d\n",
1792 queue_work(mdev->workqueue, &priv->watchdog_task);
1796 mutex_unlock(&mdev->state_lock);
1800 static int mlx4_en_calc_media(struct mlx4_en_priv *priv)
1806 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN)
1809 * [ShaharK] mlx4_en_QUERY_PORT sleeps and cannot be called under a
1810 * non-sleepable lock.
1811 * I moved it to the periodic mlx4_en_do_get_stats.
1812 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
1816 trans_type = priv->port_state.transciver;
1817 /* XXX I don't know all of the transceiver values. */
1818 switch (priv->port_state.link_speed) {
1820 active |= IFM_1000_T;
1823 if (trans_type > 0 && trans_type <= 0xC)
1824 active |= IFM_10G_SR;
1825 else if (trans_type == 0x80 || trans_type == 0)
1826 active |= IFM_10G_CX4;
1829 active |= IFM_40G_CR4;
1832 if (priv->prof->tx_pause)
1833 active |= IFM_ETH_TXPAUSE;
1834 if (priv->prof->rx_pause)
1835 active |= IFM_ETH_RXPAUSE;
1840 static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
1842 struct mlx4_en_priv *priv;
1844 priv = dev->if_softc;
1845 ifmr->ifm_status = IFM_AVALID;
1846 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN)
1847 ifmr->ifm_status |= IFM_ACTIVE;
1848 ifmr->ifm_active = mlx4_en_calc_media(priv);
1853 static int mlx4_en_media_change(struct ifnet *dev)
1855 struct mlx4_en_priv *priv;
1856 struct ifmedia *ifm;
1861 priv = dev->if_softc;
1863 rxpause = txpause = 0;
1866 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1868 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1875 if ((IFM_SUBTYPE(ifm->ifm_media)
1876 == IFM_SUBTYPE(mlx4_en_calc_media(priv)))
1877 && (ifm->ifm_media & IFM_FDX))
1881 printf("%s: Only auto media type\n", if_name(dev));
1884 /* Allow user to set/clear pause */
1885 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
1887 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
1889 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) {
1890 priv->prof->tx_pause = txpause;
1891 priv->prof->rx_pause = rxpause;
1892 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
1893 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause,
1894 priv->prof->tx_ppp, priv->prof->rx_pause,
1895 priv->prof->rx_ppp);
1900 static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
1902 struct mlx4_en_priv *priv;
1903 struct mlx4_en_dev *mdev;
1910 priv = dev->if_softc;
1912 ifr = (struct ifreq *) data;
1916 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu);
1919 mutex_lock(&mdev->state_lock);
1920 if (dev->if_flags & IFF_UP) {
1921 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0)
1922 mlx4_en_start_port(dev);
1924 mlx4_en_set_rx_mode(dev);
1926 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1927 mlx4_en_stop_port(dev);
1928 if_link_state_change(dev, LINK_STATE_DOWN);
1931 mutex_unlock(&mdev->state_lock);
1935 mlx4_en_set_rx_mode(dev);
1939 error = ifmedia_ioctl(dev, ifr, &priv->media, command);
1942 mutex_lock(&mdev->state_lock);
1943 mask = ifr->ifr_reqcap ^ dev->if_capenable;
1944 if (mask & IFCAP_HWCSUM)
1945 dev->if_capenable ^= IFCAP_HWCSUM;
1946 if (mask & IFCAP_TSO4)
1947 dev->if_capenable ^= IFCAP_TSO4;
1948 if (mask & IFCAP_TSO6)
1949 dev->if_capenable ^= IFCAP_TSO6;
1950 if (mask & IFCAP_LRO)
1951 dev->if_capenable ^= IFCAP_LRO;
1952 if (mask & IFCAP_VLAN_HWTAGGING)
1953 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1954 if (mask & IFCAP_VLAN_HWFILTER)
1955 dev->if_capenable ^= IFCAP_VLAN_HWFILTER;
1956 if (mask & IFCAP_WOL_MAGIC)
1957 dev->if_capenable ^= IFCAP_WOL_MAGIC;
1958 if (dev->if_drv_flags & IFF_DRV_RUNNING)
1959 mlx4_en_start_port(dev);
1960 mutex_unlock(&mdev->state_lock);
1961 VLAN_CAPABILITIES(dev);
1964 error = ether_ioctl(dev, command, data);
1972 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1973 struct mlx4_en_port_profile *prof)
1975 struct net_device *dev;
1976 struct mlx4_en_priv *priv;
1977 uint8_t dev_addr[ETHER_ADDR_LEN];
1981 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1982 dev = priv->dev = if_alloc(IFT_ETHER);
1984 en_err(priv, "Net device allocation failed\n");
1988 dev->if_softc = priv;
1989 if_initname(dev, "mlxen", atomic_fetchadd_int(&mlx4_en_unit, 1));
1990 dev->if_mtu = ETHERMTU;
1991 dev->if_init = mlx4_en_open;
1992 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1993 dev->if_ioctl = mlx4_en_ioctl;
1994 dev->if_transmit = mlx4_en_transmit;
1995 dev->if_qflush = mlx4_en_qflush;
1996 dev->if_snd.ifq_maxlen = prof->tx_ring_size;
1999 * Initialize driver private data
2001 priv->counter_index = 0xff;
2002 spin_lock_init(&priv->stats_lock);
2003 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2004 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2005 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2006 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2007 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2008 callout_init(&priv->watchdog_timer, 1);
2009 #ifdef CONFIG_RFS_ACCEL
2010 INIT_LIST_HEAD(&priv->filters);
2011 spin_lock_init(&priv->filters_lock);
2014 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2017 priv->ddev = &mdev->pdev->dev;
2020 priv->port_up = false;
2021 priv->flags = prof->flags;
2022 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2023 MLX4_WQE_CTRL_SOLICITED);
2025 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2026 priv->tx_ring_num = prof->tx_ring_num;
2027 priv->tx_ring = kcalloc(MAX_TX_RINGS,
2028 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL);
2029 if (!priv->tx_ring) {
2033 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS,
2040 priv->rx_ring_num = prof->rx_ring_num;
2041 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2042 priv->mac_index = -1;
2043 priv->last_ifq_jiffies = 0;
2044 priv->if_counters_rx_errors = 0;
2045 priv->if_counters_rx_no_buffer = 0;
2046 #ifdef CONFIG_MLX4_EN_DCB
2047 if (!mlx4_is_slave(priv->mdev->dev)) {
2048 priv->dcbx_cap = DCB_CAP_DCBX_HOST;
2049 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
2050 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2051 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2053 en_info(priv, "QoS disabled - no HW support\n");
2054 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2059 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2060 INIT_HLIST_HEAD(&priv->mac_hash[i]);
2063 /* Query for default mac and max mtu */
2064 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2065 priv->mac = mdev->dev->caps.def_mac[priv->port];
2066 if (ILLEGAL_MAC(priv->mac)) {
2067 #if BITS_PER_LONG == 64
2068 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n",
2069 priv->port, priv->mac);
2070 #elif BITS_PER_LONG == 32
2071 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
2072 priv->port, priv->mac);
2080 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2083 mlx4_en_sysctl_conf(priv);
2085 err = mlx4_en_alloc_resources(priv);
2089 /* Allocate page for receive rings */
2090 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2091 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2093 en_err(priv, "Failed to allocate page for rx qps\n");
2096 priv->allocated = 1;
2099 * Set driver features
2101 dev->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
2102 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
2103 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
2104 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
2105 dev->if_capabilities |= IFCAP_LRO;
2107 if (mdev->LSO_support)
2108 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO;
2110 /* set TSO limits so that we don't have to drop TX packets */
2111 dev->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2112 dev->if_hw_tsomaxsegcount = 16;
2113 dev->if_hw_tsomaxsegsize = 65536; /* XXX can do up to 4GByte */
2115 dev->if_capenable = dev->if_capabilities;
2117 dev->if_hwassist = 0;
2118 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6))
2119 dev->if_hwassist |= CSUM_TSO;
2120 if (dev->if_capenable & IFCAP_TXCSUM)
2121 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2124 /* Register for VLAN events */
2125 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
2126 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
2127 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
2128 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
2130 mdev->pndev[priv->port] = dev;
2132 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN;
2133 mlx4_en_set_default_moderation(priv);
2135 /* Set default MAC */
2136 for (i = 0; i < ETHER_ADDR_LEN; i++)
2137 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i));
2140 ether_ifattach(dev, dev_addr);
2141 if_link_state_change(dev, LINK_STATE_DOWN);
2142 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
2143 mlx4_en_media_change, mlx4_en_media_status);
2144 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL);
2145 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL);
2146 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL);
2147 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL);
2148 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2149 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
2151 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2152 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2154 priv->registered = 1;
2156 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2157 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2160 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
2161 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2163 prof->tx_pause, prof->tx_ppp,
2164 prof->rx_pause, prof->rx_ppp);
2166 en_err(priv, "Failed setting port general configurations "
2167 "for port %d, with error %d\n", priv->port, err);
2172 en_warn(priv, "Initializing port\n");
2173 err = mlx4_INIT_PORT(mdev->dev, priv->port);
2175 en_err(priv, "Failed Initializing port\n");
2179 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2181 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2182 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY);
2189 mlx4_en_destroy_netdev(dev);
2193 static int mlx4_en_set_ring_size(struct net_device *dev,
2194 int rx_size, int tx_size)
2196 struct mlx4_en_priv *priv = netdev_priv(dev);
2197 struct mlx4_en_dev *mdev = priv->mdev;
2201 rx_size = roundup_pow_of_two(rx_size);
2202 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
2203 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
2204 tx_size = roundup_pow_of_two(tx_size);
2205 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
2206 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
2208 if (rx_size == (priv->port_up ?
2209 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) &&
2210 tx_size == priv->tx_ring[0]->size)
2212 mutex_lock(&mdev->state_lock);
2213 if (priv->port_up) {
2215 mlx4_en_stop_port(dev);
2217 mlx4_en_free_resources(priv);
2218 priv->prof->tx_ring_size = tx_size;
2219 priv->prof->rx_ring_size = rx_size;
2220 err = mlx4_en_alloc_resources(priv);
2222 en_err(priv, "Failed reallocating port resources\n");
2226 err = mlx4_en_start_port(dev);
2228 en_err(priv, "Failed starting port\n");
2231 mutex_unlock(&mdev->state_lock);
2234 static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS)
2236 struct mlx4_en_priv *priv;
2241 size = priv->prof->rx_ring_size;
2242 error = sysctl_handle_int(oidp, &size, 0, req);
2243 if (error || !req->newptr)
2245 error = -mlx4_en_set_ring_size(priv->dev, size,
2246 priv->prof->tx_ring_size);
2250 static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS)
2252 struct mlx4_en_priv *priv;
2257 size = priv->prof->tx_ring_size;
2258 error = sysctl_handle_int(oidp, &size, 0, req);
2259 if (error || !req->newptr)
2261 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size,
2267 static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS)
2269 struct mlx4_en_priv *priv;
2274 ppp = priv->prof->tx_ppp;
2275 error = sysctl_handle_int(oidp, &ppp, 0, req);
2276 if (error || !req->newptr)
2278 if (ppp > 0xff || ppp < 0)
2280 priv->prof->tx_ppp = ppp;
2281 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2282 priv->rx_mb_size + ETHER_CRC_LEN,
2283 priv->prof->tx_pause,
2285 priv->prof->rx_pause,
2286 priv->prof->rx_ppp);
2291 static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS)
2293 struct mlx4_en_priv *priv;
2294 struct mlx4_en_dev *mdev;
2302 ppp = priv->prof->rx_ppp;
2303 error = sysctl_handle_int(oidp, &ppp, 0, req);
2304 if (error || !req->newptr)
2306 if (ppp > 0xff || ppp < 0)
2308 /* See if we have to change the number of tx queues. */
2309 if (!ppp != !priv->prof->rx_ppp) {
2310 mutex_lock(&mdev->state_lock);
2311 if (priv->port_up) {
2313 mlx4_en_stop_port(priv->dev);
2315 mlx4_en_free_resources(priv);
2316 priv->prof->rx_ppp = ppp;
2317 error = -mlx4_en_alloc_resources(priv);
2319 en_err(priv, "Failed reallocating port resources\n");
2320 if (error == 0 && port_up) {
2321 error = -mlx4_en_start_port(priv->dev);
2323 en_err(priv, "Failed starting port\n");
2325 mutex_unlock(&mdev->state_lock);
2329 priv->prof->rx_ppp = ppp;
2330 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2331 priv->rx_mb_size + ETHER_CRC_LEN,
2332 priv->prof->tx_pause,
2334 priv->prof->rx_pause,
2335 priv->prof->rx_ppp);
2340 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
2342 struct net_device *dev;
2343 struct sysctl_ctx_list *ctx;
2344 struct sysctl_oid *node;
2345 struct sysctl_oid_list *node_list;
2346 struct sysctl_oid *coal;
2347 struct sysctl_oid_list *coal_list;
2348 const char *pnameunit;
2351 ctx = &priv->conf_ctx;
2352 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev);
2354 sysctl_ctx_init(ctx);
2355 priv->sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw),
2356 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet");
2357 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO,
2358 "conf", CTLFLAG_RD, NULL, "Configuration");
2359 node_list = SYSCTL_CHILDREN(node);
2361 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable",
2362 CTLFLAG_RW, &priv->msg_enable, 0,
2363 "Driver message enable bitfield");
2364 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings",
2365 CTLFLAG_RD, &priv->rx_ring_num, 0,
2366 "Number of receive rings");
2367 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings",
2368 CTLFLAG_RD, &priv->tx_ring_num, 0,
2369 "Number of transmit rings");
2370 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size",
2371 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2372 mlx4_en_set_rx_ring_size, "I", "Receive ring size");
2373 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size",
2374 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2375 mlx4_en_set_tx_ring_size, "I", "Transmit ring size");
2376 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp",
2377 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2378 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause");
2379 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp",
2380 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2381 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause");
2382 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num",
2383 CTLFLAG_RD, &priv->port, 0,
2385 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name",
2386 CTLFLAG_RD, __DECONST(void *, pnameunit), 0,
2389 /* Add coalescer configuration. */
2390 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO,
2391 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration");
2392 coal_list = SYSCTL_CHILDREN(node);
2393 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low",
2394 CTLFLAG_RW, &priv->pkt_rate_low, 0,
2395 "Packets per-second for minimum delay");
2396 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low",
2397 CTLFLAG_RW, &priv->rx_usecs_low, 0,
2398 "Minimum RX delay in micro-seconds");
2399 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high",
2400 CTLFLAG_RW, &priv->pkt_rate_high, 0,
2401 "Packets per-second for maximum delay");
2402 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high",
2403 CTLFLAG_RW, &priv->rx_usecs_high, 0,
2404 "Maximum RX delay in micro-seconds");
2405 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval",
2406 CTLFLAG_RW, &priv->sample_interval, 0,
2407 "adaptive frequency in units of HZ ticks");
2408 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal",
2409 CTLFLAG_RW, &priv->adaptive_rx_coal, 0,
2410 "Enable adaptive rx coalescing");
2413 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
2415 struct net_device *dev;
2416 struct sysctl_ctx_list *ctx;
2417 struct sysctl_oid *node;
2418 struct sysctl_oid_list *node_list;
2419 struct sysctl_oid *ring_node;
2420 struct sysctl_oid_list *ring_list;
2421 struct mlx4_en_tx_ring *tx_ring;
2422 struct mlx4_en_rx_ring *rx_ring;
2428 ctx = &priv->stat_ctx;
2429 sysctl_ctx_init(ctx);
2430 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO,
2431 "stat", CTLFLAG_RD, NULL, "Statistics");
2432 node_list = SYSCTL_CHILDREN(node);
2434 #ifdef MLX4_EN_PERF_STAT
2435 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD,
2436 &priv->pstats.tx_poll, "TX Poll calls");
2437 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD,
2438 &priv->pstats.tx_pktsz_avg, "TX average packet size");
2439 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD,
2440 &priv->pstats.inflight_avg, "TX average packets in-flight");
2441 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD,
2442 &priv->pstats.tx_coal_avg, "TX average coalesced completions");
2443 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD,
2444 &priv->pstats.rx_coal_avg, "RX average coalesced completions");
2447 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD,
2448 &priv->port_stats.tso_packets, "TSO packets sent");
2449 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD,
2450 &priv->port_stats.queue_stopped, "Queue full");
2451 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD,
2452 &priv->port_stats.wake_queue, "Queue resumed after full");
2453 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
2454 &priv->port_stats.tx_timeout, "Transmit timeouts");
2455 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
2456 &priv->port_stats.rx_alloc_failed, "RX failed to allocate mbuf");
2457 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
2458 &priv->port_stats.rx_chksum_good, "RX checksum offload success");
2459 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD,
2460 &priv->port_stats.rx_chksum_none, "RX without checksum offload");
2461 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_chksum_offload",
2462 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload,
2463 "TX checksum offloads");
2465 /* Could strdup the names and add in a loop. This is simpler. */
2466 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
2467 &priv->pkstats.rx_bytes, "RX Bytes");
2468 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD,
2469 &priv->pkstats.rx_packets, "RX packets");
2470 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD,
2471 &priv->pkstats.rx_multicast_packets, "RX Multicast Packets");
2472 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD,
2473 &priv->pkstats.rx_broadcast_packets, "RX Broadcast Packets");
2474 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD,
2475 &priv->pkstats.rx_errors, "RX Errors");
2476 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD,
2477 &priv->pkstats.rx_dropped, "RX Dropped");
2478 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD,
2479 &priv->pkstats.rx_length_errors, "RX Length Errors");
2480 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD,
2481 &priv->pkstats.rx_over_errors, "RX Over Errors");
2482 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
2483 &priv->pkstats.rx_crc_errors, "RX CRC Errors");
2484 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD,
2485 &priv->pkstats.rx_jabbers, "RX Jabbers");
2488 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD,
2489 &priv->pkstats.rx_in_range_length_error, "RX IN_Range Length Error");
2490 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_out_range_length_error",
2491 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error,
2492 "RX Out Range Length Error");
2493 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD,
2494 &priv->pkstats.rx_lt_64_bytes_packets, "RX Lt 64 Bytes Packets");
2495 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD,
2496 &priv->pkstats.rx_127_bytes_packets, "RX 127 bytes Packets");
2497 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD,
2498 &priv->pkstats.rx_255_bytes_packets, "RX 255 bytes Packets");
2499 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD,
2500 &priv->pkstats.rx_511_bytes_packets, "RX 511 bytes Packets");
2501 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD,
2502 &priv->pkstats.rx_1023_bytes_packets, "RX 1023 bytes Packets");
2503 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD,
2504 &priv->pkstats.rx_1518_bytes_packets, "RX 1518 bytes Packets");
2505 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD,
2506 &priv->pkstats.rx_1522_bytes_packets, "RX 1522 bytes Packets");
2507 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD,
2508 &priv->pkstats.rx_1548_bytes_packets, "RX 1548 bytes Packets");
2509 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD,
2510 &priv->pkstats.rx_gt_1548_bytes_packets,
2511 "RX Greater Then 1548 bytes Packets");
2513 struct mlx4_en_pkt_stats {
2514 unsigned long tx_packets;
2515 unsigned long tx_bytes;
2516 unsigned long tx_multicast_packets;
2517 unsigned long tx_broadcast_packets;
2518 unsigned long tx_errors;
2519 unsigned long tx_dropped;
2520 unsigned long tx_lt_64_bytes_packets;
2521 unsigned long tx_127_bytes_packets;
2522 unsigned long tx_255_bytes_packets;
2523 unsigned long tx_511_bytes_packets;
2524 unsigned long tx_1023_bytes_packets;
2525 unsigned long tx_1518_bytes_packets;
2526 unsigned long tx_1522_bytes_packets;
2527 unsigned long tx_1548_bytes_packets;
2528 unsigned long tx_gt_1548_bytes_packets;
2529 unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
2530 unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
2531 #define NUM_PKT_STATS 72
2535 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
2536 &priv->pkstats.tx_packets, "TX packets");
2537 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
2538 &priv->pkstats.tx_packets, "TX Bytes");
2539 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
2540 &priv->pkstats.tx_multicast_packets, "TX Multicast Packets");
2541 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
2542 &priv->pkstats.tx_broadcast_packets, "TX Broadcast Packets");
2543 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD,
2544 &priv->pkstats.tx_errors, "TX Errors");
2545 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD,
2546 &priv->pkstats.tx_dropped, "TX Dropped");
2547 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD,
2548 &priv->pkstats.tx_lt_64_bytes_packets, "TX Less Then 64 Bytes Packets");
2549 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD,
2550 &priv->pkstats.tx_127_bytes_packets, "TX 127 Bytes Packets");
2551 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD,
2552 &priv->pkstats.tx_255_bytes_packets, "TX 255 Bytes Packets");
2553 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD,
2554 &priv->pkstats.tx_511_bytes_packets, "TX 511 Bytes Packets");
2555 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD,
2556 &priv->pkstats.tx_1023_bytes_packets, "TX 1023 Bytes Packets");
2557 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD,
2558 &priv->pkstats.tx_1518_bytes_packets, "TX 1518 Bytes Packets");
2559 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD,
2560 &priv->pkstats.tx_1522_bytes_packets, "TX 1522 Bytes Packets");
2561 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD,
2562 &priv->pkstats.tx_1548_bytes_packets, "TX 1548 Bytes Packets");
2563 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD,
2564 &priv->pkstats.tx_gt_1548_bytes_packets,
2565 "TX Greater Then 1548 Bytes Packets");
2569 for (i = 0; i < priv->tx_ring_num; i++) {
2570 tx_ring = priv->tx_ring[i];
2571 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i);
2572 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2573 CTLFLAG_RD, NULL, "TX Ring");
2574 ring_list = SYSCTL_CHILDREN(ring_node);
2575 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets",
2576 CTLFLAG_RD, &tx_ring->packets, "TX packets");
2577 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
2578 CTLFLAG_RD, &tx_ring->bytes, "TX bytes");
2581 for (i = 0; i < priv->rx_ring_num; i++) {
2582 rx_ring = priv->rx_ring[i];
2583 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i);
2584 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2585 CTLFLAG_RD, NULL, "RX Ring");
2586 ring_list = SYSCTL_CHILDREN(ring_node);
2587 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets",
2588 CTLFLAG_RD, &rx_ring->packets, "RX packets");
2589 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
2590 CTLFLAG_RD, &rx_ring->bytes, "RX bytes");
2591 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "error",
2592 CTLFLAG_RD, &rx_ring->errors, "RX soft errors");