2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #include <linux/compat.h>
38 #ifdef CONFIG_NET_RX_BUSY_POLL
39 #include <net/busy_poll.h>
42 #include <linux/list.h>
43 #include <linux/if_ether.h>
45 #include <dev/mlx4/driver.h>
46 #include <dev/mlx4/device.h>
47 #include <dev/mlx4/cmd.h>
48 #include <dev/mlx4/cq.h>
50 #include <sys/eventhandler.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
57 NETDUMP_DEFINE(mlx4_en);
59 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv);
60 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv);
62 #ifdef CONFIG_NET_RX_BUSY_POLL
63 /* must be called with local_bh_disable()d */
64 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
66 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
67 struct net_device *dev = cq->dev;
68 struct mlx4_en_priv *priv = netdev_priv(dev);
69 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
73 return LL_FLUSH_FAILED;
75 if (!mlx4_en_cq_lock_poll(cq))
78 done = mlx4_en_process_rx_cq(dev, cq, 4);
79 #ifdef LL_EXTENDED_STATS
81 rx_ring->cleaned += done;
86 mlx4_en_cq_unlock_poll(cq);
90 #endif /* CONFIG_NET_RX_BUSY_POLL */
92 #ifdef CONFIG_RFS_ACCEL
94 struct mlx4_en_filter {
95 struct list_head next;
96 struct work_struct work;
105 struct mlx4_en_priv *priv;
106 u32 flow_id; /* RFS infrastructure id */
107 int id; /* mlx4_en driver id */
108 u64 reg_id; /* Flow steering API id */
109 u8 activated; /* Used to prevent expiry before filter
112 struct hlist_node filter_chain;
115 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
117 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
121 return MLX4_NET_TRANS_RULE_ID_UDP;
123 return MLX4_NET_TRANS_RULE_ID_TCP;
125 return MLX4_NET_TRANS_RULE_NUM;
129 static void mlx4_en_filter_work(struct work_struct *work)
131 struct mlx4_en_filter *filter = container_of(work,
132 struct mlx4_en_filter,
134 struct mlx4_en_priv *priv = filter->priv;
135 struct mlx4_spec_list spec_tcp_udp = {
136 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
139 .dst_port = filter->dst_port,
140 .dst_port_msk = (__force __be16)-1,
141 .src_port = filter->src_port,
142 .src_port_msk = (__force __be16)-1,
146 struct mlx4_spec_list spec_ip = {
147 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
150 .dst_ip = filter->dst_ip,
151 .dst_ip_msk = (__force __be32)-1,
152 .src_ip = filter->src_ip,
153 .src_ip_msk = (__force __be32)-1,
157 struct mlx4_spec_list spec_eth = {
158 .id = MLX4_NET_TRANS_RULE_ID_ETH,
160 struct mlx4_net_trans_rule rule = {
161 .list = LIST_HEAD_INIT(rule.list),
162 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
165 .promisc_mode = MLX4_FS_REGULAR,
167 .priority = MLX4_DOMAIN_RFS,
170 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
172 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
173 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
177 list_add_tail(&spec_eth.list, &rule.list);
178 list_add_tail(&spec_ip.list, &rule.list);
179 list_add_tail(&spec_tcp_udp.list, &rule.list);
181 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
182 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
183 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
185 filter->activated = 0;
187 if (filter->reg_id) {
188 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
189 if (rc && rc != -ENOENT)
190 en_err(priv, "Error detaching flow. rc = %d\n", rc);
193 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
195 en_err(priv, "Error attaching flow. err = %d\n", rc);
198 mlx4_en_filter_rfs_expire(priv);
200 filter->activated = 1;
203 static inline struct hlist_head *
204 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
205 __be16 src_port, __be16 dst_port)
210 l = (__force unsigned long)src_port |
211 ((__force unsigned long)dst_port << 2);
212 l ^= (__force unsigned long)(src_ip ^ dst_ip);
214 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
216 return &priv->filter_hash[bucket_idx];
219 static struct mlx4_en_filter *
220 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
221 __be32 dst_ip, u8 ip_proto, __be16 src_port,
222 __be16 dst_port, u32 flow_id)
224 struct mlx4_en_filter *filter = NULL;
226 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
231 filter->rxq_index = rxq_index;
232 INIT_WORK(&filter->work, mlx4_en_filter_work);
234 filter->src_ip = src_ip;
235 filter->dst_ip = dst_ip;
236 filter->ip_proto = ip_proto;
237 filter->src_port = src_port;
238 filter->dst_port = dst_port;
240 filter->flow_id = flow_id;
242 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
244 list_add_tail(&filter->next, &priv->filters);
245 hlist_add_head(&filter->filter_chain,
246 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
252 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
254 struct mlx4_en_priv *priv = filter->priv;
257 list_del(&filter->next);
259 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
260 if (rc && rc != -ENOENT)
261 en_err(priv, "Error detaching flow. rc = %d\n", rc);
266 static inline struct mlx4_en_filter *
267 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
268 u8 ip_proto, __be16 src_port, __be16 dst_port)
270 struct mlx4_en_filter *filter;
271 struct mlx4_en_filter *ret = NULL;
273 hlist_for_each_entry(filter,
274 filter_hash_bucket(priv, src_ip, dst_ip,
277 if (filter->src_ip == src_ip &&
278 filter->dst_ip == dst_ip &&
279 filter->ip_proto == ip_proto &&
280 filter->src_port == src_port &&
281 filter->dst_port == dst_port) {
291 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
292 u16 rxq_index, u32 flow_id)
294 struct mlx4_en_priv *priv = netdev_priv(net_dev);
295 struct mlx4_en_filter *filter;
296 const struct iphdr *ip;
303 int nhoff = skb_network_offset(skb);
306 if (skb->protocol != htons(ETH_P_IP))
307 return -EPROTONOSUPPORT;
309 ip = (const struct iphdr *)(skb->data + nhoff);
310 if (ip_is_fragment(ip))
311 return -EPROTONOSUPPORT;
313 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
314 return -EPROTONOSUPPORT;
315 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
317 ip_proto = ip->protocol;
323 spin_lock_bh(&priv->filters_lock);
324 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
327 if (filter->rxq_index == rxq_index)
330 filter->rxq_index = rxq_index;
332 filter = mlx4_en_filter_alloc(priv, rxq_index,
333 src_ip, dst_ip, ip_proto,
334 src_port, dst_port, flow_id);
341 queue_work(priv->mdev->workqueue, &filter->work);
346 spin_unlock_bh(&priv->filters_lock);
351 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
353 struct mlx4_en_filter *filter, *tmp;
356 spin_lock_bh(&priv->filters_lock);
357 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
358 list_move(&filter->next, &del_list);
359 hlist_del(&filter->filter_chain);
361 spin_unlock_bh(&priv->filters_lock);
363 list_for_each_entry_safe(filter, tmp, &del_list, next) {
364 cancel_work_sync(&filter->work);
365 mlx4_en_filter_free(filter);
369 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
371 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
375 spin_lock_bh(&priv->filters_lock);
376 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
377 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
380 if (filter->activated &&
381 !work_pending(&filter->work) &&
382 rps_may_expire_flow(priv->dev,
383 filter->rxq_index, filter->flow_id,
385 list_move(&filter->next, &del_list);
386 hlist_del(&filter->filter_chain);
388 last_filter = filter;
393 if (last_filter && (&last_filter->next != priv->filters.next))
394 list_move(&priv->filters, &last_filter->next);
396 spin_unlock_bh(&priv->filters_lock);
398 list_for_each_entry_safe(filter, tmp, &del_list, next)
399 mlx4_en_filter_free(filter);
403 static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid)
405 struct mlx4_en_priv *priv = netdev_priv(dev);
406 struct mlx4_en_dev *mdev = priv->mdev;
413 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
415 set_bit(vid, priv->active_vlans);
417 /* Add VID to port VLAN filter */
418 mutex_lock(&mdev->state_lock);
419 if (mdev->device_up && priv->port_up) {
420 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
422 en_err(priv, "Failed configuring VLAN filter\n");
424 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
425 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
426 mutex_unlock(&mdev->state_lock);
430 static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid)
432 struct mlx4_en_priv *priv = netdev_priv(dev);
433 struct mlx4_en_dev *mdev = priv->mdev;
439 en_dbg(HW, priv, "Killing VID:%d\n", vid);
441 clear_bit(vid, priv->active_vlans);
443 /* Remove VID from port VLAN filter */
444 mutex_lock(&mdev->state_lock);
445 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
447 if (mdev->device_up && priv->port_up) {
448 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
450 en_err(priv, "Failed configuring VLAN filter\n");
452 mutex_unlock(&mdev->state_lock);
456 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
457 int qpn, u64 *reg_id)
461 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
462 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
463 return 0; /* do nothing */
465 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
466 MLX4_DOMAIN_NIC, reg_id);
468 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
471 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id);
475 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
476 unsigned char *mac, int *qpn, u64 *reg_id)
478 struct mlx4_en_dev *mdev = priv->mdev;
479 struct mlx4_dev *dev = mdev->dev;
482 switch (dev->caps.steering_mode) {
483 case MLX4_STEERING_MODE_B0: {
488 memcpy(&gid[10], mac, ETH_ALEN);
491 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
494 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
495 struct mlx4_spec_list spec_eth = { {NULL} };
496 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
498 struct mlx4_net_trans_rule rule = {
499 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
502 .promisc_mode = MLX4_FS_REGULAR,
503 .priority = MLX4_DOMAIN_NIC,
506 rule.port = priv->port;
508 INIT_LIST_HEAD(&rule.list);
510 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
511 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
512 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
513 list_add_tail(&spec_eth.list, &rule.list);
515 err = mlx4_flow_attach(dev, &rule, reg_id);
522 en_warn(priv, "Failed Attaching Unicast\n");
527 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
528 unsigned char *mac, int qpn, u64 reg_id)
530 struct mlx4_en_dev *mdev = priv->mdev;
531 struct mlx4_dev *dev = mdev->dev;
533 switch (dev->caps.steering_mode) {
534 case MLX4_STEERING_MODE_B0: {
539 memcpy(&gid[10], mac, ETH_ALEN);
542 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
545 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
546 mlx4_flow_detach(dev, reg_id);
550 en_err(priv, "Invalid steering mode.\n");
554 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
556 struct mlx4_en_dev *mdev = priv->mdev;
557 struct mlx4_dev *dev = mdev->dev;
560 int *qpn = &priv->base_qpn;
561 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
563 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
564 IF_LLADDR(priv->dev));
565 index = mlx4_register_mac(dev, priv->port, mac);
568 en_err(priv, "Failed adding MAC: %pM\n",
569 IF_LLADDR(priv->dev));
573 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
574 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
575 *qpn = base_qpn + index;
579 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
580 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
582 en_err(priv, "Failed to reserve qp for mac registration\n");
583 mlx4_unregister_mac(dev, priv->port, mac);
590 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
592 struct mlx4_en_dev *mdev = priv->mdev;
593 struct mlx4_dev *dev = mdev->dev;
594 int qpn = priv->base_qpn;
596 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
597 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
598 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
599 IF_LLADDR(priv->dev));
600 mlx4_unregister_mac(dev, priv->port, mac);
602 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
604 mlx4_qp_release_range(dev, qpn, 1);
605 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
609 static void mlx4_en_clear_uclist(struct net_device *dev)
611 struct mlx4_en_priv *priv = netdev_priv(dev);
612 struct mlx4_en_addr_list *tmp, *uc_to_del;
614 list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) {
615 list_del(&uc_to_del->list);
620 static void mlx4_en_cache_uclist(struct net_device *dev)
622 struct mlx4_en_priv *priv = netdev_priv(dev);
623 struct mlx4_en_addr_list *tmp;
626 mlx4_en_clear_uclist(dev);
629 CK_STAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) {
630 if (ifa->ifa_addr->sa_family != AF_LINK)
632 if (((struct sockaddr_dl *)ifa->ifa_addr)->sdl_alen !=
635 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
637 en_err(priv, "Failed to allocate address list\n");
641 LLADDR((struct sockaddr_dl *)ifa->ifa_addr), ETH_ALEN);
642 list_add_tail(&tmp->list, &priv->uc_list);
644 if_addr_runlock(dev);
647 static void mlx4_en_clear_mclist(struct net_device *dev)
649 struct mlx4_en_priv *priv = netdev_priv(dev);
650 struct mlx4_en_addr_list *tmp, *mc_to_del;
652 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
653 list_del(&mc_to_del->list);
658 static void mlx4_en_cache_mclist(struct net_device *dev)
660 struct mlx4_en_priv *priv = netdev_priv(dev);
661 struct mlx4_en_addr_list *tmp;
662 struct ifmultiaddr *ifma;
664 mlx4_en_clear_mclist(dev);
667 CK_STAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) {
668 if (ifma->ifma_addr->sa_family != AF_LINK)
670 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen !=
673 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
675 en_err(priv, "Failed to allocate address list\n");
679 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN);
680 list_add_tail(&tmp->list, &priv->mc_list);
682 if_maddr_runlock(dev);
685 static void update_addr_list_flags(struct mlx4_en_priv *priv,
686 struct list_head *dst,
687 struct list_head *src)
689 struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc;
692 /* Find all the entries that should be removed from dst,
693 * These are the entries that are not found in src
695 list_for_each_entry(dst_tmp, dst, list) {
697 list_for_each_entry(src_tmp, src, list) {
698 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
704 dst_tmp->action = MLX4_ADDR_LIST_REM;
707 /* Add entries that exist in src but not in dst
708 * mark them as need to add
710 list_for_each_entry(src_tmp, src, list) {
712 list_for_each_entry(dst_tmp, dst, list) {
713 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
714 dst_tmp->action = MLX4_ADDR_LIST_NONE;
720 new_mc = kmalloc(sizeof(struct mlx4_en_addr_list),
723 en_err(priv, "Failed to allocate current multicast list\n");
726 memcpy(new_mc, src_tmp,
727 sizeof(struct mlx4_en_addr_list));
728 new_mc->action = MLX4_ADDR_LIST_ADD;
729 list_add_tail(&new_mc->list, dst);
734 static void mlx4_en_set_rx_mode(struct net_device *dev)
736 struct mlx4_en_priv *priv = netdev_priv(dev);
741 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
744 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
745 struct mlx4_en_dev *mdev)
749 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
750 priv->flags |= MLX4_EN_FLAG_PROMISC;
752 /* Enable promiscouos mode */
753 switch (mdev->dev->caps.steering_mode) {
754 case MLX4_STEERING_MODE_DEVICE_MANAGED:
755 err = mlx4_flow_steer_promisc_add(mdev->dev,
758 MLX4_FS_ALL_DEFAULT);
760 en_err(priv, "Failed enabling promiscuous mode\n");
761 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
764 case MLX4_STEERING_MODE_B0:
765 err = mlx4_unicast_promisc_add(mdev->dev,
769 en_err(priv, "Failed enabling unicast promiscuous mode\n");
771 /* Add the default qp number as multicast
774 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
775 err = mlx4_multicast_promisc_add(mdev->dev,
779 en_err(priv, "Failed enabling multicast promiscuous mode\n");
780 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
784 case MLX4_STEERING_MODE_A0:
785 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
790 en_err(priv, "Failed enabling promiscuous mode\n");
794 /* Disable port multicast filter (unconditionally) */
795 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
796 0, MLX4_MCAST_DISABLE);
798 en_err(priv, "Failed disabling multicast filter\n");
802 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
803 struct mlx4_en_dev *mdev)
807 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
809 /* Disable promiscouos mode */
810 switch (mdev->dev->caps.steering_mode) {
811 case MLX4_STEERING_MODE_DEVICE_MANAGED:
812 err = mlx4_flow_steer_promisc_remove(mdev->dev,
814 MLX4_FS_ALL_DEFAULT);
816 en_err(priv, "Failed disabling promiscuous mode\n");
817 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
820 case MLX4_STEERING_MODE_B0:
821 err = mlx4_unicast_promisc_remove(mdev->dev,
825 en_err(priv, "Failed disabling unicast promiscuous mode\n");
826 /* Disable Multicast promisc */
827 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
828 err = mlx4_multicast_promisc_remove(mdev->dev,
832 en_err(priv, "Failed disabling multicast promiscuous mode\n");
833 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
837 case MLX4_STEERING_MODE_A0:
838 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
842 en_err(priv, "Failed disabling promiscuous mode\n");
847 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
848 struct net_device *dev,
849 struct mlx4_en_dev *mdev)
851 struct mlx4_en_addr_list *addr_list, *tmp;
852 u8 mc_list[16] = {0};
857 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
858 if (dev->if_flags & IFF_ALLMULTI) {
859 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
860 0, MLX4_MCAST_DISABLE);
862 en_err(priv, "Failed disabling multicast filter\n");
864 /* Add the default qp number as multicast promisc */
865 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
866 switch (mdev->dev->caps.steering_mode) {
867 case MLX4_STEERING_MODE_DEVICE_MANAGED:
868 err = mlx4_flow_steer_promisc_add(mdev->dev,
874 case MLX4_STEERING_MODE_B0:
875 err = mlx4_multicast_promisc_add(mdev->dev,
880 case MLX4_STEERING_MODE_A0:
884 en_err(priv, "Failed entering multicast promisc mode\n");
885 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
888 /* Disable Multicast promisc */
889 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
890 switch (mdev->dev->caps.steering_mode) {
891 case MLX4_STEERING_MODE_DEVICE_MANAGED:
892 err = mlx4_flow_steer_promisc_remove(mdev->dev,
897 case MLX4_STEERING_MODE_B0:
898 err = mlx4_multicast_promisc_remove(mdev->dev,
903 case MLX4_STEERING_MODE_A0:
907 en_err(priv, "Failed disabling multicast promiscuous mode\n");
908 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
911 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
912 0, MLX4_MCAST_DISABLE);
914 en_err(priv, "Failed disabling multicast filter\n");
916 /* Flush mcast filter and init it with broadcast address */
917 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
918 1, MLX4_MCAST_CONFIG);
920 /* Update multicast list - we cache all addresses so they won't
921 * change while HW is updated holding the command semaphor */
922 mlx4_en_cache_mclist(dev);
923 list_for_each_entry(addr_list, &priv->mc_list, list) {
924 mcast_addr = mlx4_mac_to_u64(addr_list->addr);
925 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
926 mcast_addr, 0, MLX4_MCAST_CONFIG);
928 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
929 0, MLX4_MCAST_ENABLE);
931 en_err(priv, "Failed enabling multicast filter\n");
933 update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list);
935 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
936 if (addr_list->action == MLX4_ADDR_LIST_REM) {
937 /* detach this address and delete from list */
938 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
939 mc_list[5] = priv->port;
940 err = mlx4_multicast_detach(mdev->dev,
941 &priv->rss_map.indir_qp,
946 en_err(priv, "Fail to detach multicast address\n");
948 if (addr_list->tunnel_reg_id) {
949 err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id);
951 en_err(priv, "Failed to detach multicast address\n");
954 /* remove from list */
955 list_del(&addr_list->list);
957 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
958 /* attach the address */
959 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
960 /* needed for B0 steering support */
961 mc_list[5] = priv->port;
962 err = mlx4_multicast_attach(mdev->dev,
963 &priv->rss_map.indir_qp,
969 en_err(priv, "Fail to attach multicast address\n");
971 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
972 &addr_list->tunnel_reg_id);
974 en_err(priv, "Failed to attach multicast address\n");
980 static void mlx4_en_do_unicast(struct mlx4_en_priv *priv,
981 struct net_device *dev,
982 struct mlx4_en_dev *mdev)
984 struct mlx4_en_addr_list *addr_list, *tmp;
987 /* Update unicast list */
988 mlx4_en_cache_uclist(dev);
990 update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list);
992 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
993 if (addr_list->action == MLX4_ADDR_LIST_REM) {
994 mlx4_en_uc_steer_release(priv, addr_list->addr,
995 priv->rss_map.indir_qp.qpn,
997 /* remove from list */
998 list_del(&addr_list->list);
1000 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
1001 err = mlx4_en_uc_steer_add(priv, addr_list->addr,
1002 &priv->rss_map.indir_qp.qpn,
1003 &addr_list->reg_id);
1005 en_err(priv, "Fail to add unicast address\n");
1010 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1012 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1014 struct mlx4_en_dev *mdev = priv->mdev;
1015 struct net_device *dev = priv->dev;
1017 mutex_lock(&mdev->state_lock);
1018 if (!mdev->device_up) {
1019 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1022 if (!priv->port_up) {
1023 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1026 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1027 if (priv->port_state.link_state) {
1028 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1029 /* update netif baudrate */
1030 priv->dev->if_baudrate =
1031 IF_Mbps(priv->port_state.link_speed);
1032 /* Important note: the following call for if_link_state_change
1033 * is needed for interface up scenario (start port, link state
1035 if_link_state_change(priv->dev, LINK_STATE_UP);
1036 en_dbg(HW, priv, "Link Up\n");
1040 /* Set unicast rules */
1041 mlx4_en_do_unicast(priv, dev, mdev);
1043 /* Promsicuous mode: disable all filters */
1044 if ((dev->if_flags & IFF_PROMISC) ||
1045 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1046 mlx4_en_set_promisc_mode(priv, mdev);
1047 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1048 /* Not in promiscuous mode */
1049 mlx4_en_clear_promisc_mode(priv, mdev);
1052 /* Set multicast rules */
1053 mlx4_en_do_multicast(priv, dev, mdev);
1055 mutex_unlock(&mdev->state_lock);
1058 static void mlx4_en_watchdog_timeout(void *arg)
1060 struct mlx4_en_priv *priv = arg;
1061 struct mlx4_en_dev *mdev = priv->mdev;
1063 en_dbg(DRV, priv, "Scheduling watchdog\n");
1064 queue_work(mdev->workqueue, &priv->watchdog_task);
1066 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1067 mlx4_en_watchdog_timeout, priv);
1072 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1074 struct mlx4_en_cq *cq;
1077 /* If we haven't received a specific coalescing setting
1078 * (module param), we set the moderation parameters as follows:
1079 * - moder_cnt is set to the number of mtu sized packets to
1080 * satisfy our coalescing target.
1081 * - moder_time is set to a fixed value.
1083 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1084 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1085 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1086 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1087 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - "
1088 "rx_frames:%d rx_usecs:%d\n",
1089 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs);
1091 /* Setup cq moderation params */
1092 for (i = 0; i < priv->rx_ring_num; i++) {
1093 cq = priv->rx_cq[i];
1094 cq->moder_cnt = priv->rx_frames;
1095 cq->moder_time = priv->rx_usecs;
1096 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1097 priv->last_moder_packets[i] = 0;
1098 priv->last_moder_bytes[i] = 0;
1101 for (i = 0; i < priv->tx_ring_num; i++) {
1102 cq = priv->tx_cq[i];
1103 cq->moder_cnt = priv->tx_frames;
1104 cq->moder_time = priv->tx_usecs;
1107 /* Reset auto-moderation params */
1108 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1109 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1110 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1111 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1112 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1113 priv->adaptive_rx_coal = 1;
1114 priv->last_moder_jiffies = 0;
1115 priv->last_moder_tx_packets = 0;
1118 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1120 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1121 struct mlx4_en_cq *cq;
1122 unsigned long packets;
1124 unsigned long avg_pkt_size;
1125 unsigned long rx_packets;
1126 unsigned long rx_bytes;
1127 unsigned long rx_pkt_diff;
1131 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1134 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1135 spin_lock(&priv->stats_lock);
1136 rx_packets = priv->rx_ring[ring]->packets;
1137 rx_bytes = priv->rx_ring[ring]->bytes;
1138 spin_unlock(&priv->stats_lock);
1140 rx_pkt_diff = ((unsigned long) (rx_packets -
1141 priv->last_moder_packets[ring]));
1142 packets = rx_pkt_diff;
1143 rate = packets * HZ / period;
1144 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1145 priv->last_moder_bytes[ring])) / packets : 0;
1147 /* Apply auto-moderation only when packet rate
1148 * exceeds a rate that it matters */
1149 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1150 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1151 if (rate < priv->pkt_rate_low)
1152 moder_time = priv->rx_usecs_low;
1153 else if (rate > priv->pkt_rate_high)
1154 moder_time = priv->rx_usecs_high;
1156 moder_time = (rate - priv->pkt_rate_low) *
1157 (priv->rx_usecs_high - priv->rx_usecs_low) /
1158 (priv->pkt_rate_high - priv->pkt_rate_low) +
1161 moder_time = priv->rx_usecs_low;
1164 if (moder_time != priv->last_moder_time[ring]) {
1165 priv->last_moder_time[ring] = moder_time;
1166 cq = priv->rx_cq[ring];
1167 cq->moder_time = moder_time;
1168 cq->moder_cnt = priv->rx_frames;
1169 err = mlx4_en_set_cq_moder(priv, cq);
1171 en_err(priv, "Failed modifying moderation for cq:%d\n",
1174 priv->last_moder_packets[ring] = rx_packets;
1175 priv->last_moder_bytes[ring] = rx_bytes;
1178 priv->last_moder_jiffies = jiffies;
1181 static void mlx4_en_do_get_stats(struct work_struct *work)
1183 struct delayed_work *delay = to_delayed_work(work);
1184 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1186 struct mlx4_en_dev *mdev = priv->mdev;
1189 mutex_lock(&mdev->state_lock);
1190 if (mdev->device_up) {
1191 if (priv->port_up) {
1192 if (mlx4_is_slave(mdev->dev))
1193 err = mlx4_en_get_vport_stats(mdev, priv->port);
1195 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1197 en_dbg(HW, priv, "Could not update stats\n");
1199 mlx4_en_auto_moderation(priv);
1202 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1204 mutex_unlock(&mdev->state_lock);
1207 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1210 static void mlx4_en_service_task(struct work_struct *work)
1212 struct delayed_work *delay = to_delayed_work(work);
1213 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1215 struct mlx4_en_dev *mdev = priv->mdev;
1217 mutex_lock(&mdev->state_lock);
1218 if (mdev->device_up) {
1219 queue_delayed_work(mdev->workqueue, &priv->service_task,
1220 SERVICE_TASK_DELAY);
1222 mutex_unlock(&mdev->state_lock);
1225 static void mlx4_en_linkstate(struct work_struct *work)
1227 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1229 struct mlx4_en_dev *mdev = priv->mdev;
1230 int linkstate = priv->link_state;
1232 mutex_lock(&mdev->state_lock);
1233 /* If observable port state changed set carrier state and
1234 * report to system log */
1235 if (priv->last_link_state != linkstate) {
1236 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1237 en_info(priv, "Link Down\n");
1238 if_link_state_change(priv->dev, LINK_STATE_DOWN);
1239 /* update netif baudrate */
1240 priv->dev->if_baudrate = 0;
1242 /* make sure the port is up before notifying the OS.
1243 * This is tricky since we get here on INIT_PORT and
1244 * in such case we can't tell the OS the port is up.
1245 * To solve this there is a call to if_link_state_change
1248 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){
1249 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
1250 en_info(priv, "Query port failed\n");
1251 priv->dev->if_baudrate =
1252 IF_Mbps(priv->port_state.link_speed);
1253 en_info(priv, "Link Up\n");
1254 if_link_state_change(priv->dev, LINK_STATE_UP);
1257 priv->last_link_state = linkstate;
1258 mutex_unlock(&mdev->state_lock);
1262 int mlx4_en_start_port(struct net_device *dev)
1264 struct mlx4_en_priv *priv = netdev_priv(dev);
1265 struct mlx4_en_dev *mdev = priv->mdev;
1266 struct mlx4_en_cq *cq;
1267 struct mlx4_en_tx_ring *tx_ring;
1273 u8 mc_list[16] = {0};
1276 if (priv->port_up) {
1277 en_dbg(DRV, priv, "start port called while port already up\n");
1281 INIT_LIST_HEAD(&priv->mc_list);
1282 INIT_LIST_HEAD(&priv->uc_list);
1283 INIT_LIST_HEAD(&priv->curr_mc_list);
1284 INIT_LIST_HEAD(&priv->curr_uc_list);
1285 INIT_LIST_HEAD(&priv->ethtool_list);
1287 /* Calculate Rx buf size */
1288 dev->if_mtu = min(dev->if_mtu, priv->max_mtu);
1289 mlx4_en_calc_rx_buf(dev);
1290 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size);
1292 /* Configure rx cq's and rings */
1293 err = mlx4_en_activate_rx_rings(priv);
1295 en_err(priv, "Failed to activate RX rings\n");
1298 for (i = 0; i < priv->rx_ring_num; i++) {
1299 cq = priv->rx_cq[i];
1301 mlx4_en_cq_init_lock(cq);
1302 err = mlx4_en_activate_cq(priv, cq, i);
1304 en_err(priv, "Failed activating Rx CQ\n");
1307 for (j = 0; j < cq->size; j++)
1308 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1309 err = mlx4_en_set_cq_moder(priv, cq);
1311 en_err(priv, "Failed setting cq moderation parameters");
1312 mlx4_en_deactivate_cq(priv, cq);
1315 mlx4_en_arm_cq(priv, cq);
1316 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1321 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1322 err = mlx4_en_get_qp(priv);
1324 en_err(priv, "Failed getting eth qp\n");
1327 mdev->mac_removed[priv->port] = 0;
1329 priv->counter_index =
1330 mlx4_get_default_counter_index(mdev->dev, priv->port);
1332 err = mlx4_en_config_rss_steer(priv);
1334 en_err(priv, "Failed configuring rss steering\n");
1338 err = mlx4_en_create_drop_qp(priv);
1342 /* Configure tx cq's and rings */
1343 for (i = 0; i < priv->tx_ring_num; i++) {
1345 cq = priv->tx_cq[i];
1346 err = mlx4_en_activate_cq(priv, cq, i);
1348 en_err(priv, "Failed activating Tx CQ\n");
1351 err = mlx4_en_set_cq_moder(priv, cq);
1353 en_err(priv, "Failed setting cq moderation parameters");
1354 mlx4_en_deactivate_cq(priv, cq);
1357 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1358 cq->buf->wqe_index = cpu_to_be16(0xffff);
1360 /* Configure ring */
1361 tx_ring = priv->tx_ring[i];
1363 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1364 i / priv->num_tx_rings_p_up);
1366 en_err(priv, "Failed activating Tx ring %d\n", i);
1367 mlx4_en_deactivate_cq(priv, cq);
1371 /* Arm CQ for TX completions */
1372 mlx4_en_arm_cq(priv, cq);
1374 /* Set initial ownership of all Tx TXBBs to SW (1) */
1375 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1376 *((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT;
1380 /* Configure port */
1381 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1383 priv->prof->tx_pause,
1385 priv->prof->rx_pause,
1386 priv->prof->rx_ppp);
1388 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1392 /* Set default qp number */
1393 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1395 en_err(priv, "Failed setting default qp numbers\n");
1400 en_dbg(HW, priv, "Initializing port\n");
1401 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1403 en_err(priv, "Failed Initializing port\n");
1407 /* Attach rx QP to bradcast address */
1408 memset(&mc_list[10], 0xff, ETH_ALEN);
1409 mc_list[5] = priv->port; /* needed for B0 steering support */
1410 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1411 priv->port, 0, MLX4_PROT_ETH,
1412 &priv->broadcast_id))
1413 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1415 /* Must redo promiscuous mode setup. */
1416 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1418 /* Schedule multicast task to populate multicast list */
1419 queue_work(mdev->workqueue, &priv->rx_mode_task);
1421 priv->port_up = true;
1423 /* Enable the queues. */
1424 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1425 dev->if_drv_flags |= IFF_DRV_RUNNING;
1426 #ifdef CONFIG_DEBUG_FS
1427 mlx4_en_create_debug_files(priv);
1429 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1430 mlx4_en_watchdog_timeout, priv);
1436 while (tx_index--) {
1437 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1438 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1440 mlx4_en_destroy_drop_qp(priv);
1442 mlx4_en_release_rss_steer(priv);
1444 mlx4_en_put_qp(priv);
1447 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1448 for (i = 0; i < priv->rx_ring_num; i++)
1449 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1451 return err; /* need to close devices */
1455 void mlx4_en_stop_port(struct net_device *dev)
1457 struct mlx4_en_priv *priv = netdev_priv(dev);
1458 struct mlx4_en_dev *mdev = priv->mdev;
1459 struct mlx4_en_addr_list *addr_list, *tmp;
1461 u8 mc_list[16] = {0};
1463 if (!priv->port_up) {
1464 en_dbg(DRV, priv, "stop port called while port already down\n");
1468 #ifdef CONFIG_DEBUG_FS
1469 mlx4_en_delete_debug_files(priv);
1473 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1475 /* Set port as not active */
1476 priv->port_up = false;
1477 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1479 /* Promsicuous mode */
1480 if (mdev->dev->caps.steering_mode ==
1481 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1482 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1483 MLX4_EN_FLAG_MC_PROMISC);
1484 mlx4_flow_steer_promisc_remove(mdev->dev,
1486 MLX4_FS_ALL_DEFAULT);
1487 mlx4_flow_steer_promisc_remove(mdev->dev,
1489 MLX4_FS_MC_DEFAULT);
1490 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1491 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1493 /* Disable promiscouos mode */
1494 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1497 /* Disable Multicast promisc */
1498 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1499 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1501 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1505 /* Detach All unicasts */
1506 list_for_each_entry(addr_list, &priv->curr_uc_list, list) {
1507 mlx4_en_uc_steer_release(priv, addr_list->addr,
1508 priv->rss_map.indir_qp.qpn,
1511 mlx4_en_clear_uclist(dev);
1512 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
1513 list_del(&addr_list->list);
1517 /* Detach All multicasts */
1518 memset(&mc_list[10], 0xff, ETH_ALEN);
1519 mc_list[5] = priv->port; /* needed for B0 steering support */
1520 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1521 MLX4_PROT_ETH, priv->broadcast_id);
1522 list_for_each_entry(addr_list, &priv->curr_mc_list, list) {
1523 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
1524 mc_list[5] = priv->port;
1525 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1526 mc_list, MLX4_PROT_ETH, addr_list->reg_id);
1528 mlx4_en_clear_mclist(dev);
1529 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
1530 list_del(&addr_list->list);
1534 /* Flush multicast filter */
1535 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1536 mlx4_en_destroy_drop_qp(priv);
1539 for (i = 0; i < priv->tx_ring_num; i++) {
1540 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1541 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1545 for (i = 0; i < priv->tx_ring_num; i++)
1546 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1549 mlx4_en_release_rss_steer(priv);
1551 /* Unregister Mac address for the port */
1552 mlx4_en_put_qp(priv);
1553 mdev->mac_removed[priv->port] = 1;
1556 for (i = 0; i < priv->rx_ring_num; i++) {
1557 struct mlx4_en_cq *cq = priv->rx_cq[i];
1558 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1559 mlx4_en_deactivate_cq(priv, cq);
1562 callout_stop(&priv->watchdog_timer);
1564 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1567 static void mlx4_en_restart(struct work_struct *work)
1569 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1571 struct mlx4_en_dev *mdev = priv->mdev;
1572 struct net_device *dev = priv->dev;
1573 struct mlx4_en_tx_ring *ring;
1577 if (priv->blocked == 0 || priv->port_up == 0)
1579 for (i = 0; i < priv->tx_ring_num; i++) {
1582 ring = priv->tx_ring[i];
1583 watchdog_time = READ_ONCE(ring->watchdog_time);
1584 if (watchdog_time != 0 &&
1585 time_after(ticks, ring->watchdog_time))
1591 priv->port_stats.tx_timeout++;
1592 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1594 mutex_lock(&mdev->state_lock);
1595 if (priv->port_up) {
1596 mlx4_en_stop_port(dev);
1597 //for (i = 0; i < priv->tx_ring_num; i++)
1598 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue);
1599 if (mlx4_en_start_port(dev))
1600 en_err(priv, "Failed restarting port %d\n", priv->port);
1602 mutex_unlock(&mdev->state_lock);
1605 static void mlx4_en_clear_stats(struct net_device *dev)
1607 struct mlx4_en_priv *priv = netdev_priv(dev);
1608 struct mlx4_en_dev *mdev = priv->mdev;
1611 if (!mlx4_is_slave(mdev->dev))
1612 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1613 en_dbg(HW, priv, "Failed dumping statistics\n");
1615 memset(&priv->pstats, 0, sizeof(priv->pstats));
1616 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1617 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1618 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats));
1620 for (i = 0; i < priv->tx_ring_num; i++) {
1621 priv->tx_ring[i]->bytes = 0;
1622 priv->tx_ring[i]->packets = 0;
1623 priv->tx_ring[i]->tx_csum = 0;
1624 priv->tx_ring[i]->oversized_packets = 0;
1626 for (i = 0; i < priv->rx_ring_num; i++) {
1627 priv->rx_ring[i]->bytes = 0;
1628 priv->rx_ring[i]->packets = 0;
1629 priv->rx_ring[i]->csum_ok = 0;
1630 priv->rx_ring[i]->csum_none = 0;
1634 static void mlx4_en_open(void* arg)
1637 struct mlx4_en_priv *priv;
1638 struct mlx4_en_dev *mdev;
1639 struct net_device *dev;
1647 mutex_lock(&mdev->state_lock);
1649 if (!mdev->device_up) {
1650 en_err(priv, "Cannot open - device down/disabled\n");
1654 /* Reset HW statistics and SW counters */
1655 mlx4_en_clear_stats(dev);
1657 err = mlx4_en_start_port(dev);
1659 en_err(priv, "Failed starting port:%d\n", priv->port);
1662 mutex_unlock(&mdev->state_lock);
1666 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1670 #ifdef CONFIG_RFS_ACCEL
1671 if (priv->dev->rx_cpu_rmap) {
1672 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1673 priv->dev->rx_cpu_rmap = NULL;
1677 for (i = 0; i < priv->tx_ring_num; i++) {
1678 if (priv->tx_ring && priv->tx_ring[i])
1679 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1680 if (priv->tx_cq && priv->tx_cq[i])
1681 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1684 for (i = 0; i < priv->rx_ring_num; i++) {
1685 if (priv->rx_ring[i])
1686 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1687 priv->prof->rx_ring_size);
1689 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1692 if (priv->stat_sysctl != NULL)
1693 sysctl_ctx_free(&priv->stat_ctx);
1696 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1698 struct mlx4_en_port_profile *prof = priv->prof;
1702 /* Create rx Rings */
1703 for (i = 0; i < priv->rx_ring_num; i++) {
1704 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1705 prof->rx_ring_size, i, RX, node))
1708 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1709 prof->rx_ring_size, node))
1713 /* Create tx Rings */
1714 for (i = 0; i < priv->tx_ring_num; i++) {
1715 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1716 prof->tx_ring_size, i, TX, node))
1719 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1720 prof->tx_ring_size, TXBB_SIZE, node, i))
1724 #ifdef CONFIG_RFS_ACCEL
1725 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
1726 if (!priv->dev->rx_cpu_rmap)
1729 /* Re-create stat sysctls in case the number of rings changed. */
1730 mlx4_en_sysctl_stat(priv);
1734 en_err(priv, "Failed to allocate NIC resources\n");
1735 for (i = 0; i < priv->rx_ring_num; i++) {
1736 if (priv->rx_ring[i])
1737 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1738 prof->rx_ring_size);
1740 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1742 for (i = 0; i < priv->tx_ring_num; i++) {
1743 if (priv->tx_ring[i])
1744 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1746 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1748 priv->port_up = false;
1752 struct en_port_attribute {
1753 struct attribute attr;
1754 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf);
1755 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count);
1758 #define PORT_ATTR_RO(_name) \
1759 struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name)
1761 #define EN_PORT_ATTR(_name, _mode, _show, _store) \
1762 struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store)
1764 void mlx4_en_destroy_netdev(struct net_device *dev)
1766 struct mlx4_en_priv *priv = netdev_priv(dev);
1767 struct mlx4_en_dev *mdev = priv->mdev;
1769 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
1771 /* don't allow more IOCTLs */
1774 /* XXX wait a bit to allow IOCTL handlers to complete */
1777 if (priv->vlan_attach != NULL)
1778 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
1779 if (priv->vlan_detach != NULL)
1780 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
1782 mutex_lock(&mdev->state_lock);
1783 mlx4_en_stop_port(dev);
1784 mutex_unlock(&mdev->state_lock);
1786 /* Unregister device - this will close the port if it was up */
1787 if (priv->registered)
1788 ether_ifdetach(dev);
1790 if (priv->allocated)
1791 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
1793 cancel_delayed_work(&priv->stats_task);
1794 cancel_delayed_work(&priv->service_task);
1795 /* flush any pending task for this netdev */
1796 flush_workqueue(mdev->workqueue);
1797 callout_drain(&priv->watchdog_timer);
1799 /* Detach the netdev so tasks would not attempt to access it */
1800 mutex_lock(&mdev->state_lock);
1801 mdev->pndev[priv->port] = NULL;
1802 mutex_unlock(&mdev->state_lock);
1805 mlx4_en_free_resources(priv);
1807 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */
1808 if (priv->conf_sysctl != NULL)
1809 sysctl_ctx_free(&priv->conf_ctx);
1811 kfree(priv->tx_ring);
1819 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1821 struct mlx4_en_priv *priv = netdev_priv(dev);
1822 struct mlx4_en_dev *mdev = priv->mdev;
1825 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n",
1826 (unsigned)dev->if_mtu, (unsigned)new_mtu);
1828 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
1829 en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu,
1833 mutex_lock(&mdev->state_lock);
1834 dev->if_mtu = new_mtu;
1835 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1836 if (!mdev->device_up) {
1837 /* NIC is probably restarting - let watchdog task reset
1839 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
1841 mlx4_en_stop_port(dev);
1842 err = mlx4_en_start_port(dev);
1844 en_err(priv, "Failed restarting port:%d\n",
1846 queue_work(mdev->workqueue, &priv->watchdog_task);
1850 mutex_unlock(&mdev->state_lock);
1854 static int mlx4_en_calc_media(struct mlx4_en_priv *priv)
1860 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN)
1863 trans_type = priv->port_state.transceiver;
1864 /* XXX I don't know all of the transceiver values. */
1865 switch (priv->port_state.link_speed) {
1867 active |= IFM_100_T;
1870 active |= IFM_1000_T;
1873 if (trans_type > 0 && trans_type <= 0xC)
1874 active |= IFM_10G_SR;
1875 else if (trans_type == 0x80 || trans_type == 0)
1876 active |= IFM_10G_CX4;
1879 active |= IFM_40G_CR4;
1882 if (priv->prof->tx_pause)
1883 active |= IFM_ETH_TXPAUSE;
1884 if (priv->prof->rx_pause)
1885 active |= IFM_ETH_RXPAUSE;
1890 static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
1892 struct mlx4_en_priv *priv;
1894 priv = dev->if_softc;
1895 ifmr->ifm_status = IFM_AVALID;
1896 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN)
1897 ifmr->ifm_status |= IFM_ACTIVE;
1898 ifmr->ifm_active = mlx4_en_calc_media(priv);
1903 static int mlx4_en_media_change(struct ifnet *dev)
1905 struct mlx4_en_priv *priv;
1906 struct ifmedia *ifm;
1911 priv = dev->if_softc;
1913 rxpause = txpause = 0;
1916 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1918 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1925 if ((IFM_SUBTYPE(ifm->ifm_media)
1926 == IFM_SUBTYPE(mlx4_en_calc_media(priv)))
1927 && (ifm->ifm_media & IFM_FDX))
1931 printf("%s: Only auto media type\n", if_name(dev));
1934 /* Allow user to set/clear pause */
1935 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
1937 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
1939 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) {
1940 priv->prof->tx_pause = txpause;
1941 priv->prof->rx_pause = rxpause;
1942 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
1943 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause,
1944 priv->prof->tx_ppp, priv->prof->rx_pause,
1945 priv->prof->rx_ppp);
1950 static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
1952 struct mlx4_en_priv *priv;
1953 struct mlx4_en_dev *mdev;
1957 struct ifrsskey *ifrk;
1959 struct ifrsshash *ifrh;
1964 priv = dev->if_softc;
1966 /* check if detaching */
1967 if (priv == NULL || priv->gone != 0)
1971 ifr = (struct ifreq *) data;
1975 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu);
1978 if (dev->if_flags & IFF_UP) {
1979 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1980 mutex_lock(&mdev->state_lock);
1981 mlx4_en_start_port(dev);
1982 mutex_unlock(&mdev->state_lock);
1984 mlx4_en_set_rx_mode(dev);
1987 mutex_lock(&mdev->state_lock);
1988 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1989 mlx4_en_stop_port(dev);
1990 if_link_state_change(dev, LINK_STATE_DOWN);
1992 mutex_unlock(&mdev->state_lock);
1997 mlx4_en_set_rx_mode(dev);
2001 error = ifmedia_ioctl(dev, ifr, &priv->media, command);
2004 mutex_lock(&mdev->state_lock);
2005 mask = ifr->ifr_reqcap ^ dev->if_capenable;
2006 if (mask & IFCAP_TXCSUM) {
2007 dev->if_capenable ^= IFCAP_TXCSUM;
2008 dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2010 if (IFCAP_TSO4 & dev->if_capenable &&
2011 !(IFCAP_TXCSUM & dev->if_capenable)) {
2012 dev->if_capenable &= ~IFCAP_TSO4;
2013 dev->if_hwassist &= ~CSUM_IP_TSO;
2015 "tso4 disabled due to -txcsum.\n");
2018 if (mask & IFCAP_TXCSUM_IPV6) {
2019 dev->if_capenable ^= IFCAP_TXCSUM_IPV6;
2020 dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2022 if (IFCAP_TSO6 & dev->if_capenable &&
2023 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
2024 dev->if_capenable &= ~IFCAP_TSO6;
2025 dev->if_hwassist &= ~CSUM_IP6_TSO;
2027 "tso6 disabled due to -txcsum6.\n");
2030 if (mask & IFCAP_RXCSUM)
2031 dev->if_capenable ^= IFCAP_RXCSUM;
2032 if (mask & IFCAP_RXCSUM_IPV6)
2033 dev->if_capenable ^= IFCAP_RXCSUM_IPV6;
2035 if (mask & IFCAP_TSO4) {
2036 if (!(IFCAP_TSO4 & dev->if_capenable) &&
2037 !(IFCAP_TXCSUM & dev->if_capenable)) {
2038 if_printf(dev, "enable txcsum first.\n");
2042 dev->if_capenable ^= IFCAP_TSO4;
2043 dev->if_hwassist ^= CSUM_IP_TSO;
2045 if (mask & IFCAP_TSO6) {
2046 if (!(IFCAP_TSO6 & dev->if_capenable) &&
2047 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
2048 if_printf(dev, "enable txcsum6 first.\n");
2052 dev->if_capenable ^= IFCAP_TSO6;
2053 dev->if_hwassist ^= CSUM_IP6_TSO;
2055 if (mask & IFCAP_LRO)
2056 dev->if_capenable ^= IFCAP_LRO;
2057 if (mask & IFCAP_VLAN_HWTAGGING)
2058 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2059 if (mask & IFCAP_VLAN_HWFILTER)
2060 dev->if_capenable ^= IFCAP_VLAN_HWFILTER;
2061 if (mask & IFCAP_WOL_MAGIC)
2062 dev->if_capenable ^= IFCAP_WOL_MAGIC;
2063 if (dev->if_drv_flags & IFF_DRV_RUNNING)
2064 mlx4_en_start_port(dev);
2066 mutex_unlock(&mdev->state_lock);
2067 VLAN_CAPABILITIES(dev);
2069 #if __FreeBSD_version >= 1100036
2071 struct ifi2creq i2c;
2073 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2076 if (i2c.len > sizeof(i2c.data)) {
2081 * Note that we ignore i2c.addr here. The driver hardcodes
2082 * the address to 0x50, while standard expects it to be 0xA0.
2084 error = mlx4_get_module_info(mdev->dev, priv->port,
2085 i2c.offset, i2c.len, i2c.data);
2090 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2095 ifrk = (struct ifrsskey *)data;
2096 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
2097 mutex_lock(&mdev->state_lock);
2098 key = mlx4_en_get_rss_key(priv, &ifrk->ifrk_keylen);
2099 if (ifrk->ifrk_keylen > RSS_KEYLEN)
2102 memcpy(ifrk->ifrk_key, key, ifrk->ifrk_keylen);
2103 mutex_unlock(&mdev->state_lock);
2106 case SIOCGIFRSSHASH:
2107 mutex_lock(&mdev->state_lock);
2108 rss_mask = mlx4_en_get_rss_mask(priv);
2109 mutex_unlock(&mdev->state_lock);
2110 ifrh = (struct ifrsshash *)data;
2111 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
2112 ifrh->ifrh_types = 0;
2113 if (rss_mask & MLX4_RSS_IPV4)
2114 ifrh->ifrh_types |= RSS_TYPE_IPV4;
2115 if (rss_mask & MLX4_RSS_TCP_IPV4)
2116 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV4;
2117 if (rss_mask & MLX4_RSS_IPV6)
2118 ifrh->ifrh_types |= RSS_TYPE_IPV6;
2119 if (rss_mask & MLX4_RSS_TCP_IPV6)
2120 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV6;
2121 if (rss_mask & MLX4_RSS_UDP_IPV4)
2122 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV4;
2123 if (rss_mask & MLX4_RSS_UDP_IPV6)
2124 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV6;
2128 error = ether_ioctl(dev, command, data);
2136 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2137 struct mlx4_en_port_profile *prof)
2139 struct net_device *dev;
2140 struct mlx4_en_priv *priv;
2141 uint8_t dev_addr[ETHER_ADDR_LEN];
2145 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2146 dev = priv->dev = if_alloc(IFT_ETHER);
2148 en_err(priv, "Net device allocation failed\n");
2152 dev->if_softc = priv;
2153 if_initname(dev, "mlxen", (device_get_unit(
2154 mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1);
2155 dev->if_mtu = ETHERMTU;
2156 dev->if_init = mlx4_en_open;
2157 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2158 dev->if_ioctl = mlx4_en_ioctl;
2159 dev->if_transmit = mlx4_en_transmit;
2160 dev->if_qflush = mlx4_en_qflush;
2161 dev->if_snd.ifq_maxlen = prof->tx_ring_size;
2164 * Initialize driver private data
2166 priv->counter_index = 0xff;
2167 spin_lock_init(&priv->stats_lock);
2168 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2169 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2170 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2171 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2172 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2173 callout_init(&priv->watchdog_timer, 1);
2174 #ifdef CONFIG_RFS_ACCEL
2175 INIT_LIST_HEAD(&priv->filters);
2176 spin_lock_init(&priv->filters_lock);
2179 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2182 priv->ddev = &mdev->pdev->dev;
2185 priv->port_up = false;
2186 priv->flags = prof->flags;
2188 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2189 priv->tx_ring_num = prof->tx_ring_num;
2190 priv->tx_ring = kcalloc(MAX_TX_RINGS,
2191 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL);
2192 if (!priv->tx_ring) {
2196 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS,
2203 priv->rx_ring_num = prof->rx_ring_num;
2204 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2205 priv->mac_index = -1;
2206 priv->last_ifq_jiffies = 0;
2207 priv->if_counters_rx_errors = 0;
2208 priv->if_counters_rx_no_buffer = 0;
2209 #ifdef CONFIG_MLX4_EN_DCB
2210 if (!mlx4_is_slave(priv->mdev->dev)) {
2211 priv->dcbx_cap = DCB_CAP_DCBX_HOST;
2212 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
2213 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2214 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2216 en_info(priv, "QoS disabled - no HW support\n");
2217 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2222 /* Query for default mac and max mtu */
2223 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2224 priv->mac = mdev->dev->caps.def_mac[priv->port];
2225 if (ILLEGAL_MAC(priv->mac)) {
2226 #if BITS_PER_LONG == 64
2227 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n",
2228 priv->port, priv->mac);
2229 #elif BITS_PER_LONG == 32
2230 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
2231 priv->port, priv->mac);
2237 mlx4_en_sysctl_conf(priv);
2239 err = mlx4_en_alloc_resources(priv);
2243 /* Allocate page for receive rings */
2244 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2245 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2247 en_err(priv, "Failed to allocate page for rx qps\n");
2250 priv->allocated = 1;
2253 * Set driver features
2255 dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
2256 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
2257 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
2258 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
2259 dev->if_capabilities |= IFCAP_LRO;
2260 dev->if_capabilities |= IFCAP_HWSTATS;
2262 if (mdev->LSO_support)
2263 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO;
2265 #if __FreeBSD_version >= 1100000
2266 /* set TSO limits so that we don't have to drop TX packets */
2267 dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */;
2268 dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */;
2269 dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE;
2272 dev->if_capenable = dev->if_capabilities;
2274 dev->if_hwassist = 0;
2275 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6))
2276 dev->if_hwassist |= CSUM_TSO;
2277 if (dev->if_capenable & IFCAP_TXCSUM)
2278 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2279 if (dev->if_capenable & IFCAP_TXCSUM_IPV6)
2280 dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2283 /* Register for VLAN events */
2284 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
2285 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
2286 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
2287 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
2289 mdev->pndev[priv->port] = dev;
2291 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN;
2292 mlx4_en_set_default_moderation(priv);
2294 /* Set default MAC */
2295 for (i = 0; i < ETHER_ADDR_LEN; i++)
2296 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i));
2299 ether_ifattach(dev, dev_addr);
2300 if_link_state_change(dev, LINK_STATE_DOWN);
2301 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
2302 mlx4_en_media_change, mlx4_en_media_status);
2303 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL);
2304 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL);
2305 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL);
2306 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL);
2307 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2308 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
2310 NETDUMP_SET(dev, mlx4_en);
2312 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2313 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2315 priv->registered = 1;
2317 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2318 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2321 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
2322 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2324 prof->tx_pause, prof->tx_ppp,
2325 prof->rx_pause, prof->rx_ppp);
2327 en_err(priv, "Failed setting port general configurations "
2328 "for port %d, with error %d\n", priv->port, err);
2333 en_warn(priv, "Initializing port\n");
2334 err = mlx4_INIT_PORT(mdev->dev, priv->port);
2336 en_err(priv, "Failed Initializing port\n");
2340 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2342 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2343 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY);
2348 mlx4_en_destroy_netdev(dev);
2352 static int mlx4_en_set_ring_size(struct net_device *dev,
2353 int rx_size, int tx_size)
2355 struct mlx4_en_priv *priv = netdev_priv(dev);
2356 struct mlx4_en_dev *mdev = priv->mdev;
2360 rx_size = roundup_pow_of_two(rx_size);
2361 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
2362 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
2363 tx_size = roundup_pow_of_two(tx_size);
2364 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
2365 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
2367 if (rx_size == (priv->port_up ?
2368 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) &&
2369 tx_size == priv->tx_ring[0]->size)
2371 mutex_lock(&mdev->state_lock);
2372 if (priv->port_up) {
2374 mlx4_en_stop_port(dev);
2376 mlx4_en_free_resources(priv);
2377 priv->prof->tx_ring_size = tx_size;
2378 priv->prof->rx_ring_size = rx_size;
2379 err = mlx4_en_alloc_resources(priv);
2381 en_err(priv, "Failed reallocating port resources\n");
2385 err = mlx4_en_start_port(dev);
2387 en_err(priv, "Failed starting port\n");
2390 mutex_unlock(&mdev->state_lock);
2393 static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS)
2395 struct mlx4_en_priv *priv;
2400 size = priv->prof->rx_ring_size;
2401 error = sysctl_handle_int(oidp, &size, 0, req);
2402 if (error || !req->newptr)
2404 error = -mlx4_en_set_ring_size(priv->dev, size,
2405 priv->prof->tx_ring_size);
2409 static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS)
2411 struct mlx4_en_priv *priv;
2416 size = priv->prof->tx_ring_size;
2417 error = sysctl_handle_int(oidp, &size, 0, req);
2418 if (error || !req->newptr)
2420 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size,
2426 static int mlx4_en_get_module_info(struct net_device *dev,
2427 struct ethtool_modinfo *modinfo)
2429 struct mlx4_en_priv *priv = netdev_priv(dev);
2430 struct mlx4_en_dev *mdev = priv->mdev;
2434 /* Read first 2 bytes to get Module & REV ID */
2435 ret = mlx4_get_module_info(mdev->dev, priv->port,
2436 0/*offset*/, 2/*size*/, data);
2439 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret);
2443 switch (data[0] /* identifier */) {
2444 case MLX4_MODULE_ID_QSFP:
2445 modinfo->type = ETH_MODULE_SFF_8436;
2446 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2448 case MLX4_MODULE_ID_QSFP_PLUS:
2449 if (data[1] >= 0x3) { /* revision id */
2450 modinfo->type = ETH_MODULE_SFF_8636;
2451 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2453 modinfo->type = ETH_MODULE_SFF_8436;
2454 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2457 case MLX4_MODULE_ID_QSFP28:
2458 modinfo->type = ETH_MODULE_SFF_8636;
2459 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2461 case MLX4_MODULE_ID_SFP:
2462 modinfo->type = ETH_MODULE_SFF_8472;
2463 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2466 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n");
2473 static int mlx4_en_get_module_eeprom(struct net_device *dev,
2474 struct ethtool_eeprom *ee,
2477 struct mlx4_en_priv *priv = netdev_priv(dev);
2478 struct mlx4_en_dev *mdev = priv->mdev;
2479 int offset = ee->offset;
2485 memset(data, 0, ee->len);
2487 while (i < ee->len) {
2489 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
2490 i, offset, ee->len - i);
2492 ret = mlx4_get_module_info(mdev->dev, priv->port,
2493 offset, ee->len - i, data + i);
2495 if (!ret) /* Done reading */
2500 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
2501 i, offset, ee->len - i, ret);
2511 static void mlx4_en_print_eeprom(u8 *data, __u32 len)
2516 const int NUM_OF_BYTES = 16;
2518 printf("\nOffset\t\tValues\n");
2519 printf("------\t\t------\n");
2521 printf("0x%04x\t\t",row);
2522 for(i=0; i < NUM_OF_BYTES; i++){
2523 printf("%02x ", data[j]);
2531 /* Read cable EEPROM module information by first inspecting the first
2532 * two bytes to get the length and then read the rest of the information.
2533 * The information is printed to dmesg. */
2534 static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS)
2540 struct mlx4_en_priv *priv;
2541 struct net_device *dev;
2542 struct ethtool_modinfo modinfo;
2543 struct ethtool_eeprom ee;
2545 error = sysctl_handle_int(oidp, &result, 0, req);
2546 if (error || !req->newptr)
2552 data = kmalloc(PAGE_SIZE, GFP_KERNEL);
2554 error = mlx4_en_get_module_info(dev, &modinfo);
2557 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n",
2562 ee.len = modinfo.eeprom_len;
2565 error = mlx4_en_get_module_eeprom(dev, &ee, data);
2568 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n",
2570 /* Continue printing partial information in case of an error */
2573 /* EEPROM information will be printed in dmesg */
2574 mlx4_en_print_eeprom(data, ee.len);
2578 /* Return zero to prevent sysctl failure. */
2582 static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS)
2584 struct mlx4_en_priv *priv;
2589 ppp = priv->prof->tx_ppp;
2590 error = sysctl_handle_int(oidp, &ppp, 0, req);
2591 if (error || !req->newptr)
2593 if (ppp > 0xff || ppp < 0)
2595 priv->prof->tx_ppp = ppp;
2596 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2597 priv->rx_mb_size + ETHER_CRC_LEN,
2598 priv->prof->tx_pause,
2600 priv->prof->rx_pause,
2601 priv->prof->rx_ppp);
2606 static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS)
2608 struct mlx4_en_priv *priv;
2609 struct mlx4_en_dev *mdev;
2617 ppp = priv->prof->rx_ppp;
2618 error = sysctl_handle_int(oidp, &ppp, 0, req);
2619 if (error || !req->newptr)
2621 if (ppp > 0xff || ppp < 0)
2623 /* See if we have to change the number of tx queues. */
2624 if (!ppp != !priv->prof->rx_ppp) {
2625 mutex_lock(&mdev->state_lock);
2626 if (priv->port_up) {
2628 mlx4_en_stop_port(priv->dev);
2630 mlx4_en_free_resources(priv);
2631 priv->prof->rx_ppp = ppp;
2632 error = -mlx4_en_alloc_resources(priv);
2634 en_err(priv, "Failed reallocating port resources\n");
2635 if (error == 0 && port_up) {
2636 error = -mlx4_en_start_port(priv->dev);
2638 en_err(priv, "Failed starting port\n");
2640 mutex_unlock(&mdev->state_lock);
2644 priv->prof->rx_ppp = ppp;
2645 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2646 priv->rx_mb_size + ETHER_CRC_LEN,
2647 priv->prof->tx_pause,
2649 priv->prof->rx_pause,
2650 priv->prof->rx_ppp);
2655 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
2657 struct net_device *dev;
2658 struct sysctl_ctx_list *ctx;
2659 struct sysctl_oid *node;
2660 struct sysctl_oid_list *node_list;
2661 struct sysctl_oid *coal;
2662 struct sysctl_oid_list *coal_list;
2663 const char *pnameunit;
2665 ctx = &priv->conf_ctx;
2666 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev);
2668 sysctl_ctx_init(ctx);
2669 priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw),
2670 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet");
2671 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO,
2672 "conf", CTLFLAG_RD, NULL, "Configuration");
2673 node_list = SYSCTL_CHILDREN(node);
2675 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable",
2676 CTLFLAG_RW, &priv->msg_enable, 0,
2677 "Driver message enable bitfield");
2678 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings",
2679 CTLFLAG_RD, &priv->rx_ring_num, 0,
2680 "Number of receive rings");
2681 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings",
2682 CTLFLAG_RD, &priv->tx_ring_num, 0,
2683 "Number of transmit rings");
2684 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size",
2685 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2686 mlx4_en_set_rx_ring_size, "I", "Receive ring size");
2687 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size",
2688 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2689 mlx4_en_set_tx_ring_size, "I", "Transmit ring size");
2690 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp",
2691 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2692 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause");
2693 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp",
2694 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2695 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause");
2696 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num",
2697 CTLFLAG_RD, &priv->port, 0,
2699 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name",
2700 CTLFLAG_RD, __DECONST(void *, pnameunit), 0,
2702 /* Add coalescer configuration. */
2703 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO,
2704 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration");
2705 coal_list = SYSCTL_CHILDREN(coal);
2706 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low",
2707 CTLFLAG_RW, &priv->pkt_rate_low, 0,
2708 "Packets per-second for minimum delay");
2709 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low",
2710 CTLFLAG_RW, &priv->rx_usecs_low, 0,
2711 "Minimum RX delay in micro-seconds");
2712 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high",
2713 CTLFLAG_RW, &priv->pkt_rate_high, 0,
2714 "Packets per-second for maximum delay");
2715 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high",
2716 CTLFLAG_RW, &priv->rx_usecs_high, 0,
2717 "Maximum RX delay in micro-seconds");
2718 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval",
2719 CTLFLAG_RW, &priv->sample_interval, 0,
2720 "adaptive frequency in units of HZ ticks");
2721 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal",
2722 CTLFLAG_RW, &priv->adaptive_rx_coal, 0,
2723 "Enable adaptive rx coalescing");
2724 /* EEPROM support */
2725 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info",
2726 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2727 mlx4_en_read_eeprom, "I", "EEPROM information");
2730 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
2732 struct sysctl_ctx_list *ctx;
2733 struct sysctl_oid_list *node_list;
2734 struct sysctl_oid *ring_node;
2735 struct sysctl_oid_list *ring_list;
2736 struct mlx4_en_tx_ring *tx_ring;
2737 struct mlx4_en_rx_ring *rx_ring;
2741 ctx = &priv->stat_ctx;
2742 sysctl_ctx_init(ctx);
2743 priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO,
2744 "stat", CTLFLAG_RD, NULL, "Statistics");
2745 node_list = SYSCTL_CHILDREN(priv->stat_sysctl);
2747 #ifdef MLX4_EN_PERF_STAT
2748 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD,
2749 &priv->pstats.tx_poll, "TX Poll calls");
2750 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD,
2751 &priv->pstats.tx_pktsz_avg, "TX average packet size");
2752 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD,
2753 &priv->pstats.inflight_avg, "TX average packets in-flight");
2754 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD,
2755 &priv->pstats.tx_coal_avg, "TX average coalesced completions");
2756 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD,
2757 &priv->pstats.rx_coal_avg, "RX average coalesced completions");
2760 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD,
2761 &priv->port_stats.tso_packets, 0, "TSO packets sent");
2762 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD,
2763 &priv->port_stats.queue_stopped, 0, "Queue full");
2764 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD,
2765 &priv->port_stats.wake_queue, 0, "Queue resumed after full");
2766 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
2767 &priv->port_stats.tx_timeout, 0, "Transmit timeouts");
2768 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD,
2769 &priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed");
2770 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
2771 &priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf");
2772 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
2773 &priv->port_stats.rx_chksum_good, 0, "RX checksum offload success");
2774 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD,
2775 &priv->port_stats.rx_chksum_none, 0, "RX without checksum offload");
2776 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload",
2777 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0,
2778 "TX checksum offloads");
2779 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts",
2780 CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0,
2781 "Oversized chains defragged");
2783 /* Could strdup the names and add in a loop. This is simpler. */
2784 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
2785 &priv->pkstats.rx_bytes, 0, "RX Bytes");
2786 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD,
2787 &priv->pkstats.rx_packets, 0, "RX packets");
2788 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD,
2789 &priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets");
2790 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD,
2791 &priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets");
2792 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD,
2793 &priv->pkstats.rx_errors, 0, "RX Errors");
2794 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD,
2795 &priv->pkstats.rx_dropped, 0, "RX Dropped");
2796 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD,
2797 &priv->pkstats.rx_length_errors, 0, "RX Length Errors");
2798 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD,
2799 &priv->pkstats.rx_over_errors, 0, "RX Over Errors");
2800 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
2801 &priv->pkstats.rx_crc_errors, 0, "RX CRC Errors");
2802 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD,
2803 &priv->pkstats.rx_jabbers, 0, "RX Jabbers");
2805 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD,
2806 &priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error");
2807 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error",
2808 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0,
2809 "RX Out Range Length Error");
2810 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD,
2811 &priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets");
2812 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD,
2813 &priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets");
2814 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD,
2815 &priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets");
2816 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD,
2817 &priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets");
2818 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD,
2819 &priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets");
2820 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD,
2821 &priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets");
2822 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD,
2823 &priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets");
2824 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD,
2825 &priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets");
2826 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD,
2827 &priv->pkstats.rx_gt_1548_bytes_packets, 0,
2828 "RX Greater Then 1548 bytes Packets");
2830 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
2831 &priv->pkstats.tx_packets, 0, "TX packets");
2832 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
2833 &priv->pkstats.tx_bytes, 0, "TX Bytes");
2834 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
2835 &priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets");
2836 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
2837 &priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets");
2838 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD,
2839 &priv->pkstats.tx_errors, 0, "TX Errors");
2840 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD,
2841 &priv->pkstats.tx_dropped, 0, "TX Dropped");
2842 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD,
2843 &priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets");
2844 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD,
2845 &priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets");
2846 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD,
2847 &priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets");
2848 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD,
2849 &priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets");
2850 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD,
2851 &priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets");
2852 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD,
2853 &priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets");
2854 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD,
2855 &priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets");
2856 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD,
2857 &priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets");
2858 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD,
2859 &priv->pkstats.tx_gt_1548_bytes_packets, 0,
2860 "TX Greater Then 1548 Bytes Packets");
2862 for (i = 0; i < priv->tx_ring_num; i++) {
2863 tx_ring = priv->tx_ring[i];
2864 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i);
2865 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2866 CTLFLAG_RD, NULL, "TX Ring");
2867 ring_list = SYSCTL_CHILDREN(ring_node);
2868 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
2869 CTLFLAG_RD, &tx_ring->packets, 0, "TX packets");
2870 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
2871 CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes");
2872 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets",
2873 CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets");
2874 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts",
2875 CTLFLAG_RD, &tx_ring->defrag_attempts, 0,
2876 "Oversized chains defragged");
2879 for (i = 0; i < priv->rx_ring_num; i++) {
2880 rx_ring = priv->rx_ring[i];
2881 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i);
2882 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2883 CTLFLAG_RD, NULL, "RX Ring");
2884 ring_list = SYSCTL_CHILDREN(ring_node);
2885 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
2886 CTLFLAG_RD, &rx_ring->packets, 0, "RX packets");
2887 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
2888 CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes");
2889 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error",
2890 CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors");
2896 mlx4_en_netdump_init(struct ifnet *dev, int *nrxr, int *ncl, int *clsize)
2898 struct mlx4_en_priv *priv;
2900 priv = if_getsoftc(dev);
2901 mutex_lock(&priv->mdev->state_lock);
2902 *nrxr = priv->rx_ring_num;
2903 *ncl = NETDUMP_MAX_IN_FLIGHT;
2904 *clsize = priv->rx_mb_size;
2905 mutex_unlock(&priv->mdev->state_lock);
2909 mlx4_en_netdump_event(struct ifnet *dev, enum netdump_ev event)
2914 mlx4_en_netdump_transmit(struct ifnet *dev, struct mbuf *m)
2916 struct mlx4_en_priv *priv;
2919 priv = if_getsoftc(dev);
2920 if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2921 IFF_DRV_RUNNING || !priv->link_state)
2924 err = mlx4_en_xmit(priv, 0, &m);
2925 if (err != 0 && m != NULL)
2931 mlx4_en_netdump_poll(struct ifnet *dev, int count)
2933 struct mlx4_en_priv *priv;
2935 priv = if_getsoftc(dev);
2936 if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0 || !priv->link_state)
2939 mlx4_poll_interrupts(priv->mdev->dev);
2943 #endif /* NETDUMP */