2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #include <linux/compat.h>
38 #ifdef CONFIG_NET_RX_BUSY_POLL
39 #include <net/busy_poll.h>
42 #include <linux/list.h>
43 #include <linux/if_ether.h>
45 #include <dev/mlx4/driver.h>
46 #include <dev/mlx4/device.h>
47 #include <dev/mlx4/cmd.h>
48 #include <dev/mlx4/cq.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
56 NETDUMP_DEFINE(mlx4_en);
58 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv);
59 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv);
61 #ifdef CONFIG_NET_RX_BUSY_POLL
62 /* must be called with local_bh_disable()d */
63 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
65 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
66 struct net_device *dev = cq->dev;
67 struct mlx4_en_priv *priv = netdev_priv(dev);
68 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
72 return LL_FLUSH_FAILED;
74 if (!mlx4_en_cq_lock_poll(cq))
77 done = mlx4_en_process_rx_cq(dev, cq, 4);
78 #ifdef LL_EXTENDED_STATS
80 rx_ring->cleaned += done;
85 mlx4_en_cq_unlock_poll(cq);
89 #endif /* CONFIG_NET_RX_BUSY_POLL */
91 #ifdef CONFIG_RFS_ACCEL
93 struct mlx4_en_filter {
94 struct list_head next;
95 struct work_struct work;
104 struct mlx4_en_priv *priv;
105 u32 flow_id; /* RFS infrastructure id */
106 int id; /* mlx4_en driver id */
107 u64 reg_id; /* Flow steering API id */
108 u8 activated; /* Used to prevent expiry before filter
111 struct hlist_node filter_chain;
114 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
116 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
120 return MLX4_NET_TRANS_RULE_ID_UDP;
122 return MLX4_NET_TRANS_RULE_ID_TCP;
124 return MLX4_NET_TRANS_RULE_NUM;
128 static void mlx4_en_filter_work(struct work_struct *work)
130 struct mlx4_en_filter *filter = container_of(work,
131 struct mlx4_en_filter,
133 struct mlx4_en_priv *priv = filter->priv;
134 struct mlx4_spec_list spec_tcp_udp = {
135 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
138 .dst_port = filter->dst_port,
139 .dst_port_msk = (__force __be16)-1,
140 .src_port = filter->src_port,
141 .src_port_msk = (__force __be16)-1,
145 struct mlx4_spec_list spec_ip = {
146 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
149 .dst_ip = filter->dst_ip,
150 .dst_ip_msk = (__force __be32)-1,
151 .src_ip = filter->src_ip,
152 .src_ip_msk = (__force __be32)-1,
156 struct mlx4_spec_list spec_eth = {
157 .id = MLX4_NET_TRANS_RULE_ID_ETH,
159 struct mlx4_net_trans_rule rule = {
160 .list = LIST_HEAD_INIT(rule.list),
161 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
164 .promisc_mode = MLX4_FS_REGULAR,
166 .priority = MLX4_DOMAIN_RFS,
169 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
171 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
172 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
176 list_add_tail(&spec_eth.list, &rule.list);
177 list_add_tail(&spec_ip.list, &rule.list);
178 list_add_tail(&spec_tcp_udp.list, &rule.list);
180 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
181 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
182 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
184 filter->activated = 0;
186 if (filter->reg_id) {
187 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
188 if (rc && rc != -ENOENT)
189 en_err(priv, "Error detaching flow. rc = %d\n", rc);
192 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
194 en_err(priv, "Error attaching flow. err = %d\n", rc);
197 mlx4_en_filter_rfs_expire(priv);
199 filter->activated = 1;
202 static inline struct hlist_head *
203 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
204 __be16 src_port, __be16 dst_port)
209 l = (__force unsigned long)src_port |
210 ((__force unsigned long)dst_port << 2);
211 l ^= (__force unsigned long)(src_ip ^ dst_ip);
213 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
215 return &priv->filter_hash[bucket_idx];
218 static struct mlx4_en_filter *
219 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
220 __be32 dst_ip, u8 ip_proto, __be16 src_port,
221 __be16 dst_port, u32 flow_id)
223 struct mlx4_en_filter *filter = NULL;
225 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
230 filter->rxq_index = rxq_index;
231 INIT_WORK(&filter->work, mlx4_en_filter_work);
233 filter->src_ip = src_ip;
234 filter->dst_ip = dst_ip;
235 filter->ip_proto = ip_proto;
236 filter->src_port = src_port;
237 filter->dst_port = dst_port;
239 filter->flow_id = flow_id;
241 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
243 list_add_tail(&filter->next, &priv->filters);
244 hlist_add_head(&filter->filter_chain,
245 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
251 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
253 struct mlx4_en_priv *priv = filter->priv;
256 list_del(&filter->next);
258 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
259 if (rc && rc != -ENOENT)
260 en_err(priv, "Error detaching flow. rc = %d\n", rc);
265 static inline struct mlx4_en_filter *
266 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
267 u8 ip_proto, __be16 src_port, __be16 dst_port)
269 struct mlx4_en_filter *filter;
270 struct mlx4_en_filter *ret = NULL;
272 hlist_for_each_entry(filter,
273 filter_hash_bucket(priv, src_ip, dst_ip,
276 if (filter->src_ip == src_ip &&
277 filter->dst_ip == dst_ip &&
278 filter->ip_proto == ip_proto &&
279 filter->src_port == src_port &&
280 filter->dst_port == dst_port) {
290 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
291 u16 rxq_index, u32 flow_id)
293 struct mlx4_en_priv *priv = netdev_priv(net_dev);
294 struct mlx4_en_filter *filter;
295 const struct iphdr *ip;
302 int nhoff = skb_network_offset(skb);
305 if (skb->protocol != htons(ETH_P_IP))
306 return -EPROTONOSUPPORT;
308 ip = (const struct iphdr *)(skb->data + nhoff);
309 if (ip_is_fragment(ip))
310 return -EPROTONOSUPPORT;
312 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
313 return -EPROTONOSUPPORT;
314 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
316 ip_proto = ip->protocol;
322 spin_lock_bh(&priv->filters_lock);
323 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
326 if (filter->rxq_index == rxq_index)
329 filter->rxq_index = rxq_index;
331 filter = mlx4_en_filter_alloc(priv, rxq_index,
332 src_ip, dst_ip, ip_proto,
333 src_port, dst_port, flow_id);
340 queue_work(priv->mdev->workqueue, &filter->work);
345 spin_unlock_bh(&priv->filters_lock);
350 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
352 struct mlx4_en_filter *filter, *tmp;
355 spin_lock_bh(&priv->filters_lock);
356 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
357 list_move(&filter->next, &del_list);
358 hlist_del(&filter->filter_chain);
360 spin_unlock_bh(&priv->filters_lock);
362 list_for_each_entry_safe(filter, tmp, &del_list, next) {
363 cancel_work_sync(&filter->work);
364 mlx4_en_filter_free(filter);
368 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
370 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
374 spin_lock_bh(&priv->filters_lock);
375 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
376 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
379 if (filter->activated &&
380 !work_pending(&filter->work) &&
381 rps_may_expire_flow(priv->dev,
382 filter->rxq_index, filter->flow_id,
384 list_move(&filter->next, &del_list);
385 hlist_del(&filter->filter_chain);
387 last_filter = filter;
392 if (last_filter && (&last_filter->next != priv->filters.next))
393 list_move(&priv->filters, &last_filter->next);
395 spin_unlock_bh(&priv->filters_lock);
397 list_for_each_entry_safe(filter, tmp, &del_list, next)
398 mlx4_en_filter_free(filter);
402 static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid)
404 struct mlx4_en_priv *priv = netdev_priv(dev);
405 struct mlx4_en_dev *mdev = priv->mdev;
412 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
414 set_bit(vid, priv->active_vlans);
416 /* Add VID to port VLAN filter */
417 mutex_lock(&mdev->state_lock);
418 if (mdev->device_up && priv->port_up) {
419 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
421 en_err(priv, "Failed configuring VLAN filter\n");
423 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
424 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
425 mutex_unlock(&mdev->state_lock);
429 static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid)
431 struct mlx4_en_priv *priv = netdev_priv(dev);
432 struct mlx4_en_dev *mdev = priv->mdev;
438 en_dbg(HW, priv, "Killing VID:%d\n", vid);
440 clear_bit(vid, priv->active_vlans);
442 /* Remove VID from port VLAN filter */
443 mutex_lock(&mdev->state_lock);
444 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
446 if (mdev->device_up && priv->port_up) {
447 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
449 en_err(priv, "Failed configuring VLAN filter\n");
451 mutex_unlock(&mdev->state_lock);
455 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
456 int qpn, u64 *reg_id)
460 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
461 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
462 return 0; /* do nothing */
464 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
465 MLX4_DOMAIN_NIC, reg_id);
467 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
470 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id);
474 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
475 unsigned char *mac, int *qpn, u64 *reg_id)
477 struct mlx4_en_dev *mdev = priv->mdev;
478 struct mlx4_dev *dev = mdev->dev;
481 switch (dev->caps.steering_mode) {
482 case MLX4_STEERING_MODE_B0: {
487 memcpy(&gid[10], mac, ETH_ALEN);
490 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
493 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
494 struct mlx4_spec_list spec_eth = { {NULL} };
495 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
497 struct mlx4_net_trans_rule rule = {
498 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
501 .promisc_mode = MLX4_FS_REGULAR,
502 .priority = MLX4_DOMAIN_NIC,
505 rule.port = priv->port;
507 INIT_LIST_HEAD(&rule.list);
509 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
510 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
511 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
512 list_add_tail(&spec_eth.list, &rule.list);
514 err = mlx4_flow_attach(dev, &rule, reg_id);
521 en_warn(priv, "Failed Attaching Unicast\n");
526 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
527 unsigned char *mac, int qpn, u64 reg_id)
529 struct mlx4_en_dev *mdev = priv->mdev;
530 struct mlx4_dev *dev = mdev->dev;
532 switch (dev->caps.steering_mode) {
533 case MLX4_STEERING_MODE_B0: {
538 memcpy(&gid[10], mac, ETH_ALEN);
541 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
544 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
545 mlx4_flow_detach(dev, reg_id);
549 en_err(priv, "Invalid steering mode.\n");
553 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
555 struct mlx4_en_dev *mdev = priv->mdev;
556 struct mlx4_dev *dev = mdev->dev;
559 int *qpn = &priv->base_qpn;
560 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
562 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
563 IF_LLADDR(priv->dev));
564 index = mlx4_register_mac(dev, priv->port, mac);
567 en_err(priv, "Failed adding MAC: %pM\n",
568 IF_LLADDR(priv->dev));
572 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
573 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
574 *qpn = base_qpn + index;
578 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
579 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
581 en_err(priv, "Failed to reserve qp for mac registration\n");
582 mlx4_unregister_mac(dev, priv->port, mac);
589 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
591 struct mlx4_en_dev *mdev = priv->mdev;
592 struct mlx4_dev *dev = mdev->dev;
593 int qpn = priv->base_qpn;
595 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
596 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
597 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
598 IF_LLADDR(priv->dev));
599 mlx4_unregister_mac(dev, priv->port, mac);
601 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
603 mlx4_qp_release_range(dev, qpn, 1);
604 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
608 static void mlx4_en_clear_uclist(struct net_device *dev)
610 struct mlx4_en_priv *priv = netdev_priv(dev);
611 struct mlx4_en_addr_list *tmp, *uc_to_del;
613 list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) {
614 list_del(&uc_to_del->list);
619 static void mlx4_en_cache_uclist(struct net_device *dev)
621 struct mlx4_en_priv *priv = netdev_priv(dev);
622 struct mlx4_en_addr_list *tmp;
625 mlx4_en_clear_uclist(dev);
628 CK_STAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) {
629 if (ifa->ifa_addr->sa_family != AF_LINK)
631 if (((struct sockaddr_dl *)ifa->ifa_addr)->sdl_alen !=
634 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
636 en_err(priv, "Failed to allocate address list\n");
640 LLADDR((struct sockaddr_dl *)ifa->ifa_addr), ETH_ALEN);
641 list_add_tail(&tmp->list, &priv->uc_list);
643 if_addr_runlock(dev);
646 static void mlx4_en_clear_mclist(struct net_device *dev)
648 struct mlx4_en_priv *priv = netdev_priv(dev);
649 struct mlx4_en_addr_list *tmp, *mc_to_del;
651 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
652 list_del(&mc_to_del->list);
657 static void mlx4_en_cache_mclist(struct net_device *dev)
659 struct mlx4_en_priv *priv = netdev_priv(dev);
660 struct mlx4_en_addr_list *tmp;
661 struct ifmultiaddr *ifma;
663 mlx4_en_clear_mclist(dev);
666 CK_STAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) {
667 if (ifma->ifma_addr->sa_family != AF_LINK)
669 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen !=
672 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
674 en_err(priv, "Failed to allocate address list\n");
678 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN);
679 list_add_tail(&tmp->list, &priv->mc_list);
681 if_maddr_runlock(dev);
684 static void update_addr_list_flags(struct mlx4_en_priv *priv,
685 struct list_head *dst,
686 struct list_head *src)
688 struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc;
691 /* Find all the entries that should be removed from dst,
692 * These are the entries that are not found in src
694 list_for_each_entry(dst_tmp, dst, list) {
696 list_for_each_entry(src_tmp, src, list) {
697 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
703 dst_tmp->action = MLX4_ADDR_LIST_REM;
706 /* Add entries that exist in src but not in dst
707 * mark them as need to add
709 list_for_each_entry(src_tmp, src, list) {
711 list_for_each_entry(dst_tmp, dst, list) {
712 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
713 dst_tmp->action = MLX4_ADDR_LIST_NONE;
719 new_mc = kmalloc(sizeof(struct mlx4_en_addr_list),
722 en_err(priv, "Failed to allocate current multicast list\n");
725 memcpy(new_mc, src_tmp,
726 sizeof(struct mlx4_en_addr_list));
727 new_mc->action = MLX4_ADDR_LIST_ADD;
728 list_add_tail(&new_mc->list, dst);
733 static void mlx4_en_set_rx_mode(struct net_device *dev)
735 struct mlx4_en_priv *priv = netdev_priv(dev);
740 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
743 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
744 struct mlx4_en_dev *mdev)
748 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
749 priv->flags |= MLX4_EN_FLAG_PROMISC;
751 /* Enable promiscouos mode */
752 switch (mdev->dev->caps.steering_mode) {
753 case MLX4_STEERING_MODE_DEVICE_MANAGED:
754 err = mlx4_flow_steer_promisc_add(mdev->dev,
757 MLX4_FS_ALL_DEFAULT);
759 en_err(priv, "Failed enabling promiscuous mode\n");
760 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
763 case MLX4_STEERING_MODE_B0:
764 err = mlx4_unicast_promisc_add(mdev->dev,
768 en_err(priv, "Failed enabling unicast promiscuous mode\n");
770 /* Add the default qp number as multicast
773 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
774 err = mlx4_multicast_promisc_add(mdev->dev,
778 en_err(priv, "Failed enabling multicast promiscuous mode\n");
779 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
783 case MLX4_STEERING_MODE_A0:
784 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
789 en_err(priv, "Failed enabling promiscuous mode\n");
793 /* Disable port multicast filter (unconditionally) */
794 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
795 0, MLX4_MCAST_DISABLE);
797 en_err(priv, "Failed disabling multicast filter\n");
801 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
802 struct mlx4_en_dev *mdev)
806 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
808 /* Disable promiscouos mode */
809 switch (mdev->dev->caps.steering_mode) {
810 case MLX4_STEERING_MODE_DEVICE_MANAGED:
811 err = mlx4_flow_steer_promisc_remove(mdev->dev,
813 MLX4_FS_ALL_DEFAULT);
815 en_err(priv, "Failed disabling promiscuous mode\n");
816 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
819 case MLX4_STEERING_MODE_B0:
820 err = mlx4_unicast_promisc_remove(mdev->dev,
824 en_err(priv, "Failed disabling unicast promiscuous mode\n");
825 /* Disable Multicast promisc */
826 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
827 err = mlx4_multicast_promisc_remove(mdev->dev,
831 en_err(priv, "Failed disabling multicast promiscuous mode\n");
832 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
836 case MLX4_STEERING_MODE_A0:
837 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
841 en_err(priv, "Failed disabling promiscuous mode\n");
846 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
847 struct net_device *dev,
848 struct mlx4_en_dev *mdev)
850 struct mlx4_en_addr_list *addr_list, *tmp;
851 u8 mc_list[16] = {0};
856 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
857 if (dev->if_flags & IFF_ALLMULTI) {
858 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
859 0, MLX4_MCAST_DISABLE);
861 en_err(priv, "Failed disabling multicast filter\n");
863 /* Add the default qp number as multicast promisc */
864 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
865 switch (mdev->dev->caps.steering_mode) {
866 case MLX4_STEERING_MODE_DEVICE_MANAGED:
867 err = mlx4_flow_steer_promisc_add(mdev->dev,
873 case MLX4_STEERING_MODE_B0:
874 err = mlx4_multicast_promisc_add(mdev->dev,
879 case MLX4_STEERING_MODE_A0:
883 en_err(priv, "Failed entering multicast promisc mode\n");
884 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
887 /* Disable Multicast promisc */
888 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
889 switch (mdev->dev->caps.steering_mode) {
890 case MLX4_STEERING_MODE_DEVICE_MANAGED:
891 err = mlx4_flow_steer_promisc_remove(mdev->dev,
896 case MLX4_STEERING_MODE_B0:
897 err = mlx4_multicast_promisc_remove(mdev->dev,
902 case MLX4_STEERING_MODE_A0:
906 en_err(priv, "Failed disabling multicast promiscuous mode\n");
907 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
910 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
911 0, MLX4_MCAST_DISABLE);
913 en_err(priv, "Failed disabling multicast filter\n");
915 /* Flush mcast filter and init it with broadcast address */
916 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
917 1, MLX4_MCAST_CONFIG);
919 /* Update multicast list - we cache all addresses so they won't
920 * change while HW is updated holding the command semaphor */
921 mlx4_en_cache_mclist(dev);
922 list_for_each_entry(addr_list, &priv->mc_list, list) {
923 mcast_addr = mlx4_mac_to_u64(addr_list->addr);
924 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
925 mcast_addr, 0, MLX4_MCAST_CONFIG);
927 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
928 0, MLX4_MCAST_ENABLE);
930 en_err(priv, "Failed enabling multicast filter\n");
932 update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list);
934 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
935 if (addr_list->action == MLX4_ADDR_LIST_REM) {
936 /* detach this address and delete from list */
937 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
938 mc_list[5] = priv->port;
939 err = mlx4_multicast_detach(mdev->dev,
940 &priv->rss_map.indir_qp,
945 en_err(priv, "Fail to detach multicast address\n");
947 if (addr_list->tunnel_reg_id) {
948 err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id);
950 en_err(priv, "Failed to detach multicast address\n");
953 /* remove from list */
954 list_del(&addr_list->list);
956 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
957 /* attach the address */
958 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
959 /* needed for B0 steering support */
960 mc_list[5] = priv->port;
961 err = mlx4_multicast_attach(mdev->dev,
962 &priv->rss_map.indir_qp,
968 en_err(priv, "Fail to attach multicast address\n");
970 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
971 &addr_list->tunnel_reg_id);
973 en_err(priv, "Failed to attach multicast address\n");
979 static void mlx4_en_do_unicast(struct mlx4_en_priv *priv,
980 struct net_device *dev,
981 struct mlx4_en_dev *mdev)
983 struct mlx4_en_addr_list *addr_list, *tmp;
986 /* Update unicast list */
987 mlx4_en_cache_uclist(dev);
989 update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list);
991 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
992 if (addr_list->action == MLX4_ADDR_LIST_REM) {
993 mlx4_en_uc_steer_release(priv, addr_list->addr,
994 priv->rss_map.indir_qp.qpn,
996 /* remove from list */
997 list_del(&addr_list->list);
999 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
1000 err = mlx4_en_uc_steer_add(priv, addr_list->addr,
1001 &priv->rss_map.indir_qp.qpn,
1002 &addr_list->reg_id);
1004 en_err(priv, "Fail to add unicast address\n");
1009 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1011 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1013 struct mlx4_en_dev *mdev = priv->mdev;
1014 struct net_device *dev = priv->dev;
1016 mutex_lock(&mdev->state_lock);
1017 if (!mdev->device_up) {
1018 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1021 if (!priv->port_up) {
1022 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1025 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1026 if (priv->port_state.link_state) {
1027 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1028 /* update netif baudrate */
1029 priv->dev->if_baudrate =
1030 IF_Mbps(priv->port_state.link_speed);
1031 /* Important note: the following call for if_link_state_change
1032 * is needed for interface up scenario (start port, link state
1034 if_link_state_change(priv->dev, LINK_STATE_UP);
1035 en_dbg(HW, priv, "Link Up\n");
1039 /* Set unicast rules */
1040 mlx4_en_do_unicast(priv, dev, mdev);
1042 /* Promsicuous mode: disable all filters */
1043 if ((dev->if_flags & IFF_PROMISC) ||
1044 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1045 mlx4_en_set_promisc_mode(priv, mdev);
1046 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1047 /* Not in promiscuous mode */
1048 mlx4_en_clear_promisc_mode(priv, mdev);
1051 /* Set multicast rules */
1052 mlx4_en_do_multicast(priv, dev, mdev);
1054 mutex_unlock(&mdev->state_lock);
1057 static void mlx4_en_watchdog_timeout(void *arg)
1059 struct mlx4_en_priv *priv = arg;
1060 struct mlx4_en_dev *mdev = priv->mdev;
1062 en_dbg(DRV, priv, "Scheduling watchdog\n");
1063 queue_work(mdev->workqueue, &priv->watchdog_task);
1065 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1066 mlx4_en_watchdog_timeout, priv);
1071 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1073 struct mlx4_en_cq *cq;
1076 /* If we haven't received a specific coalescing setting
1077 * (module param), we set the moderation parameters as follows:
1078 * - moder_cnt is set to the number of mtu sized packets to
1079 * satisfy our coalescing target.
1080 * - moder_time is set to a fixed value.
1082 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1083 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1084 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1085 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1086 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - "
1087 "rx_frames:%d rx_usecs:%d\n",
1088 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs);
1090 /* Setup cq moderation params */
1091 for (i = 0; i < priv->rx_ring_num; i++) {
1092 cq = priv->rx_cq[i];
1093 cq->moder_cnt = priv->rx_frames;
1094 cq->moder_time = priv->rx_usecs;
1095 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1096 priv->last_moder_packets[i] = 0;
1097 priv->last_moder_bytes[i] = 0;
1100 for (i = 0; i < priv->tx_ring_num; i++) {
1101 cq = priv->tx_cq[i];
1102 cq->moder_cnt = priv->tx_frames;
1103 cq->moder_time = priv->tx_usecs;
1106 /* Reset auto-moderation params */
1107 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1108 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1109 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1110 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1111 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1112 priv->adaptive_rx_coal = 1;
1113 priv->last_moder_jiffies = 0;
1114 priv->last_moder_tx_packets = 0;
1117 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1119 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1120 struct mlx4_en_cq *cq;
1121 unsigned long packets;
1123 unsigned long avg_pkt_size;
1124 unsigned long rx_packets;
1125 unsigned long rx_bytes;
1126 unsigned long rx_pkt_diff;
1130 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1133 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1134 spin_lock(&priv->stats_lock);
1135 rx_packets = priv->rx_ring[ring]->packets;
1136 rx_bytes = priv->rx_ring[ring]->bytes;
1137 spin_unlock(&priv->stats_lock);
1139 rx_pkt_diff = ((unsigned long) (rx_packets -
1140 priv->last_moder_packets[ring]));
1141 packets = rx_pkt_diff;
1142 rate = packets * HZ / period;
1143 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1144 priv->last_moder_bytes[ring])) / packets : 0;
1146 /* Apply auto-moderation only when packet rate
1147 * exceeds a rate that it matters */
1148 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1149 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1150 if (rate < priv->pkt_rate_low)
1151 moder_time = priv->rx_usecs_low;
1152 else if (rate > priv->pkt_rate_high)
1153 moder_time = priv->rx_usecs_high;
1155 moder_time = (rate - priv->pkt_rate_low) *
1156 (priv->rx_usecs_high - priv->rx_usecs_low) /
1157 (priv->pkt_rate_high - priv->pkt_rate_low) +
1160 moder_time = priv->rx_usecs_low;
1163 if (moder_time != priv->last_moder_time[ring]) {
1164 priv->last_moder_time[ring] = moder_time;
1165 cq = priv->rx_cq[ring];
1166 cq->moder_time = moder_time;
1167 cq->moder_cnt = priv->rx_frames;
1168 err = mlx4_en_set_cq_moder(priv, cq);
1170 en_err(priv, "Failed modifying moderation for cq:%d\n",
1173 priv->last_moder_packets[ring] = rx_packets;
1174 priv->last_moder_bytes[ring] = rx_bytes;
1177 priv->last_moder_jiffies = jiffies;
1180 static void mlx4_en_do_get_stats(struct work_struct *work)
1182 struct delayed_work *delay = to_delayed_work(work);
1183 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1185 struct mlx4_en_dev *mdev = priv->mdev;
1188 mutex_lock(&mdev->state_lock);
1189 if (mdev->device_up) {
1190 if (priv->port_up) {
1191 if (mlx4_is_slave(mdev->dev))
1192 err = mlx4_en_get_vport_stats(mdev, priv->port);
1194 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1196 en_dbg(HW, priv, "Could not update stats\n");
1198 mlx4_en_auto_moderation(priv);
1201 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1203 mutex_unlock(&mdev->state_lock);
1206 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1209 static void mlx4_en_service_task(struct work_struct *work)
1211 struct delayed_work *delay = to_delayed_work(work);
1212 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1214 struct mlx4_en_dev *mdev = priv->mdev;
1216 mutex_lock(&mdev->state_lock);
1217 if (mdev->device_up) {
1218 queue_delayed_work(mdev->workqueue, &priv->service_task,
1219 SERVICE_TASK_DELAY);
1221 mutex_unlock(&mdev->state_lock);
1224 static void mlx4_en_linkstate(struct work_struct *work)
1226 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1228 struct mlx4_en_dev *mdev = priv->mdev;
1229 int linkstate = priv->link_state;
1231 mutex_lock(&mdev->state_lock);
1232 /* If observable port state changed set carrier state and
1233 * report to system log */
1234 if (priv->last_link_state != linkstate) {
1235 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1236 en_info(priv, "Link Down\n");
1237 if_link_state_change(priv->dev, LINK_STATE_DOWN);
1238 /* update netif baudrate */
1239 priv->dev->if_baudrate = 0;
1241 /* make sure the port is up before notifying the OS.
1242 * This is tricky since we get here on INIT_PORT and
1243 * in such case we can't tell the OS the port is up.
1244 * To solve this there is a call to if_link_state_change
1247 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){
1248 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
1249 en_info(priv, "Query port failed\n");
1250 priv->dev->if_baudrate =
1251 IF_Mbps(priv->port_state.link_speed);
1252 en_info(priv, "Link Up\n");
1253 if_link_state_change(priv->dev, LINK_STATE_UP);
1256 priv->last_link_state = linkstate;
1257 mutex_unlock(&mdev->state_lock);
1261 int mlx4_en_start_port(struct net_device *dev)
1263 struct mlx4_en_priv *priv = netdev_priv(dev);
1264 struct mlx4_en_dev *mdev = priv->mdev;
1265 struct mlx4_en_cq *cq;
1266 struct mlx4_en_tx_ring *tx_ring;
1272 u8 mc_list[16] = {0};
1275 if (priv->port_up) {
1276 en_dbg(DRV, priv, "start port called while port already up\n");
1280 INIT_LIST_HEAD(&priv->mc_list);
1281 INIT_LIST_HEAD(&priv->uc_list);
1282 INIT_LIST_HEAD(&priv->curr_mc_list);
1283 INIT_LIST_HEAD(&priv->curr_uc_list);
1284 INIT_LIST_HEAD(&priv->ethtool_list);
1286 /* Calculate Rx buf size */
1287 dev->if_mtu = min(dev->if_mtu, priv->max_mtu);
1288 mlx4_en_calc_rx_buf(dev);
1289 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size);
1291 /* Configure rx cq's and rings */
1292 err = mlx4_en_activate_rx_rings(priv);
1294 en_err(priv, "Failed to activate RX rings\n");
1297 for (i = 0; i < priv->rx_ring_num; i++) {
1298 cq = priv->rx_cq[i];
1300 mlx4_en_cq_init_lock(cq);
1301 err = mlx4_en_activate_cq(priv, cq, i);
1303 en_err(priv, "Failed activating Rx CQ\n");
1306 for (j = 0; j < cq->size; j++)
1307 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1308 err = mlx4_en_set_cq_moder(priv, cq);
1310 en_err(priv, "Failed setting cq moderation parameters");
1311 mlx4_en_deactivate_cq(priv, cq);
1314 mlx4_en_arm_cq(priv, cq);
1315 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1320 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1321 err = mlx4_en_get_qp(priv);
1323 en_err(priv, "Failed getting eth qp\n");
1326 mdev->mac_removed[priv->port] = 0;
1328 priv->counter_index =
1329 mlx4_get_default_counter_index(mdev->dev, priv->port);
1331 err = mlx4_en_config_rss_steer(priv);
1333 en_err(priv, "Failed configuring rss steering\n");
1337 err = mlx4_en_create_drop_qp(priv);
1341 /* Configure tx cq's and rings */
1342 for (i = 0; i < priv->tx_ring_num; i++) {
1344 cq = priv->tx_cq[i];
1345 err = mlx4_en_activate_cq(priv, cq, i);
1347 en_err(priv, "Failed activating Tx CQ\n");
1350 err = mlx4_en_set_cq_moder(priv, cq);
1352 en_err(priv, "Failed setting cq moderation parameters");
1353 mlx4_en_deactivate_cq(priv, cq);
1356 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1357 cq->buf->wqe_index = cpu_to_be16(0xffff);
1359 /* Configure ring */
1360 tx_ring = priv->tx_ring[i];
1362 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1363 i / priv->num_tx_rings_p_up);
1365 en_err(priv, "Failed activating Tx ring %d\n", i);
1366 mlx4_en_deactivate_cq(priv, cq);
1370 /* Arm CQ for TX completions */
1371 mlx4_en_arm_cq(priv, cq);
1373 /* Set initial ownership of all Tx TXBBs to SW (1) */
1374 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1375 *((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT;
1379 /* Configure port */
1380 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1382 priv->prof->tx_pause,
1384 priv->prof->rx_pause,
1385 priv->prof->rx_ppp);
1387 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1391 /* Set default qp number */
1392 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1394 en_err(priv, "Failed setting default qp numbers\n");
1399 en_dbg(HW, priv, "Initializing port\n");
1400 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1402 en_err(priv, "Failed Initializing port\n");
1406 /* Attach rx QP to bradcast address */
1407 memset(&mc_list[10], 0xff, ETH_ALEN);
1408 mc_list[5] = priv->port; /* needed for B0 steering support */
1409 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1410 priv->port, 0, MLX4_PROT_ETH,
1411 &priv->broadcast_id))
1412 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1414 /* Must redo promiscuous mode setup. */
1415 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1417 /* Schedule multicast task to populate multicast list */
1418 queue_work(mdev->workqueue, &priv->rx_mode_task);
1420 priv->port_up = true;
1422 /* Enable the queues. */
1423 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1424 dev->if_drv_flags |= IFF_DRV_RUNNING;
1425 #ifdef CONFIG_DEBUG_FS
1426 mlx4_en_create_debug_files(priv);
1428 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1429 mlx4_en_watchdog_timeout, priv);
1435 while (tx_index--) {
1436 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1437 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1439 mlx4_en_destroy_drop_qp(priv);
1441 mlx4_en_release_rss_steer(priv);
1443 mlx4_en_put_qp(priv);
1446 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1447 for (i = 0; i < priv->rx_ring_num; i++)
1448 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1450 return err; /* need to close devices */
1454 void mlx4_en_stop_port(struct net_device *dev)
1456 struct mlx4_en_priv *priv = netdev_priv(dev);
1457 struct mlx4_en_dev *mdev = priv->mdev;
1458 struct mlx4_en_addr_list *addr_list, *tmp;
1460 u8 mc_list[16] = {0};
1462 if (!priv->port_up) {
1463 en_dbg(DRV, priv, "stop port called while port already down\n");
1467 #ifdef CONFIG_DEBUG_FS
1468 mlx4_en_delete_debug_files(priv);
1472 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1474 /* Set port as not active */
1475 priv->port_up = false;
1476 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1478 /* Promsicuous mode */
1479 if (mdev->dev->caps.steering_mode ==
1480 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1481 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1482 MLX4_EN_FLAG_MC_PROMISC);
1483 mlx4_flow_steer_promisc_remove(mdev->dev,
1485 MLX4_FS_ALL_DEFAULT);
1486 mlx4_flow_steer_promisc_remove(mdev->dev,
1488 MLX4_FS_MC_DEFAULT);
1489 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1490 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1492 /* Disable promiscouos mode */
1493 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1496 /* Disable Multicast promisc */
1497 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1498 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1500 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1504 /* Detach All unicasts */
1505 list_for_each_entry(addr_list, &priv->curr_uc_list, list) {
1506 mlx4_en_uc_steer_release(priv, addr_list->addr,
1507 priv->rss_map.indir_qp.qpn,
1510 mlx4_en_clear_uclist(dev);
1511 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
1512 list_del(&addr_list->list);
1516 /* Detach All multicasts */
1517 memset(&mc_list[10], 0xff, ETH_ALEN);
1518 mc_list[5] = priv->port; /* needed for B0 steering support */
1519 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1520 MLX4_PROT_ETH, priv->broadcast_id);
1521 list_for_each_entry(addr_list, &priv->curr_mc_list, list) {
1522 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
1523 mc_list[5] = priv->port;
1524 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1525 mc_list, MLX4_PROT_ETH, addr_list->reg_id);
1527 mlx4_en_clear_mclist(dev);
1528 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
1529 list_del(&addr_list->list);
1533 /* Flush multicast filter */
1534 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1535 mlx4_en_destroy_drop_qp(priv);
1538 for (i = 0; i < priv->tx_ring_num; i++) {
1539 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1540 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1544 for (i = 0; i < priv->tx_ring_num; i++)
1545 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1548 mlx4_en_release_rss_steer(priv);
1550 /* Unregister Mac address for the port */
1551 mlx4_en_put_qp(priv);
1552 mdev->mac_removed[priv->port] = 1;
1555 for (i = 0; i < priv->rx_ring_num; i++) {
1556 struct mlx4_en_cq *cq = priv->rx_cq[i];
1557 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1558 mlx4_en_deactivate_cq(priv, cq);
1561 callout_stop(&priv->watchdog_timer);
1563 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1566 static void mlx4_en_restart(struct work_struct *work)
1568 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1570 struct mlx4_en_dev *mdev = priv->mdev;
1571 struct net_device *dev = priv->dev;
1572 struct mlx4_en_tx_ring *ring;
1576 if (priv->blocked == 0 || priv->port_up == 0)
1578 for (i = 0; i < priv->tx_ring_num; i++) {
1581 ring = priv->tx_ring[i];
1582 watchdog_time = READ_ONCE(ring->watchdog_time);
1583 if (watchdog_time != 0 &&
1584 time_after(ticks, ring->watchdog_time))
1590 priv->port_stats.tx_timeout++;
1591 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1593 mutex_lock(&mdev->state_lock);
1594 if (priv->port_up) {
1595 mlx4_en_stop_port(dev);
1596 //for (i = 0; i < priv->tx_ring_num; i++)
1597 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue);
1598 if (mlx4_en_start_port(dev))
1599 en_err(priv, "Failed restarting port %d\n", priv->port);
1601 mutex_unlock(&mdev->state_lock);
1604 static void mlx4_en_clear_stats(struct net_device *dev)
1606 struct mlx4_en_priv *priv = netdev_priv(dev);
1607 struct mlx4_en_dev *mdev = priv->mdev;
1610 if (!mlx4_is_slave(mdev->dev))
1611 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1612 en_dbg(HW, priv, "Failed dumping statistics\n");
1614 memset(&priv->pstats, 0, sizeof(priv->pstats));
1615 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1616 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1617 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats));
1619 for (i = 0; i < priv->tx_ring_num; i++) {
1620 priv->tx_ring[i]->bytes = 0;
1621 priv->tx_ring[i]->packets = 0;
1622 priv->tx_ring[i]->tx_csum = 0;
1623 priv->tx_ring[i]->oversized_packets = 0;
1625 for (i = 0; i < priv->rx_ring_num; i++) {
1626 priv->rx_ring[i]->bytes = 0;
1627 priv->rx_ring[i]->packets = 0;
1628 priv->rx_ring[i]->csum_ok = 0;
1629 priv->rx_ring[i]->csum_none = 0;
1633 static void mlx4_en_open(void* arg)
1636 struct mlx4_en_priv *priv;
1637 struct mlx4_en_dev *mdev;
1638 struct net_device *dev;
1646 mutex_lock(&mdev->state_lock);
1648 if (!mdev->device_up) {
1649 en_err(priv, "Cannot open - device down/disabled\n");
1653 /* Reset HW statistics and SW counters */
1654 mlx4_en_clear_stats(dev);
1656 err = mlx4_en_start_port(dev);
1658 en_err(priv, "Failed starting port:%d\n", priv->port);
1661 mutex_unlock(&mdev->state_lock);
1665 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1669 #ifdef CONFIG_RFS_ACCEL
1670 if (priv->dev->rx_cpu_rmap) {
1671 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1672 priv->dev->rx_cpu_rmap = NULL;
1676 for (i = 0; i < priv->tx_ring_num; i++) {
1677 if (priv->tx_ring && priv->tx_ring[i])
1678 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1679 if (priv->tx_cq && priv->tx_cq[i])
1680 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1683 for (i = 0; i < priv->rx_ring_num; i++) {
1684 if (priv->rx_ring[i])
1685 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1686 priv->prof->rx_ring_size);
1688 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1691 if (priv->stat_sysctl != NULL)
1692 sysctl_ctx_free(&priv->stat_ctx);
1695 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1697 struct mlx4_en_port_profile *prof = priv->prof;
1701 /* Create rx Rings */
1702 for (i = 0; i < priv->rx_ring_num; i++) {
1703 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1704 prof->rx_ring_size, i, RX, node))
1707 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1708 prof->rx_ring_size, node))
1712 /* Create tx Rings */
1713 for (i = 0; i < priv->tx_ring_num; i++) {
1714 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1715 prof->tx_ring_size, i, TX, node))
1718 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1719 prof->tx_ring_size, TXBB_SIZE, node, i))
1723 #ifdef CONFIG_RFS_ACCEL
1724 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
1725 if (!priv->dev->rx_cpu_rmap)
1728 /* Re-create stat sysctls in case the number of rings changed. */
1729 mlx4_en_sysctl_stat(priv);
1733 en_err(priv, "Failed to allocate NIC resources\n");
1734 for (i = 0; i < priv->rx_ring_num; i++) {
1735 if (priv->rx_ring[i])
1736 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1737 prof->rx_ring_size);
1739 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1741 for (i = 0; i < priv->tx_ring_num; i++) {
1742 if (priv->tx_ring[i])
1743 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1745 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1747 priv->port_up = false;
1751 struct en_port_attribute {
1752 struct attribute attr;
1753 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf);
1754 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count);
1757 #define PORT_ATTR_RO(_name) \
1758 struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name)
1760 #define EN_PORT_ATTR(_name, _mode, _show, _store) \
1761 struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store)
1763 void mlx4_en_destroy_netdev(struct net_device *dev)
1765 struct mlx4_en_priv *priv = netdev_priv(dev);
1766 struct mlx4_en_dev *mdev = priv->mdev;
1768 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
1770 /* don't allow more IOCTLs */
1773 /* XXX wait a bit to allow IOCTL handlers to complete */
1776 if (priv->vlan_attach != NULL)
1777 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
1778 if (priv->vlan_detach != NULL)
1779 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
1781 /* Unregister device - this will close the port if it was up */
1782 if (priv->registered) {
1783 mutex_lock(&mdev->state_lock);
1784 ether_ifdetach(dev);
1785 mutex_unlock(&mdev->state_lock);
1788 mutex_lock(&mdev->state_lock);
1789 mlx4_en_stop_port(dev);
1790 mutex_unlock(&mdev->state_lock);
1792 if (priv->allocated)
1793 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
1795 cancel_delayed_work(&priv->stats_task);
1796 cancel_delayed_work(&priv->service_task);
1797 /* flush any pending task for this netdev */
1798 flush_workqueue(mdev->workqueue);
1799 callout_drain(&priv->watchdog_timer);
1801 /* Detach the netdev so tasks would not attempt to access it */
1802 mutex_lock(&mdev->state_lock);
1803 mdev->pndev[priv->port] = NULL;
1804 mutex_unlock(&mdev->state_lock);
1807 mlx4_en_free_resources(priv);
1809 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */
1810 if (priv->conf_sysctl != NULL)
1811 sysctl_ctx_free(&priv->conf_ctx);
1813 kfree(priv->tx_ring);
1821 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1823 struct mlx4_en_priv *priv = netdev_priv(dev);
1824 struct mlx4_en_dev *mdev = priv->mdev;
1827 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n",
1828 (unsigned)dev->if_mtu, (unsigned)new_mtu);
1830 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
1831 en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu,
1835 mutex_lock(&mdev->state_lock);
1836 dev->if_mtu = new_mtu;
1837 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1838 if (!mdev->device_up) {
1839 /* NIC is probably restarting - let watchdog task reset
1841 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
1843 mlx4_en_stop_port(dev);
1844 err = mlx4_en_start_port(dev);
1846 en_err(priv, "Failed restarting port:%d\n",
1848 queue_work(mdev->workqueue, &priv->watchdog_task);
1852 mutex_unlock(&mdev->state_lock);
1856 static int mlx4_en_calc_media(struct mlx4_en_priv *priv)
1862 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN)
1865 trans_type = priv->port_state.transceiver;
1866 /* XXX I don't know all of the transceiver values. */
1867 switch (priv->port_state.link_speed) {
1869 active |= IFM_100_T;
1872 active |= IFM_1000_T;
1875 if (trans_type > 0 && trans_type <= 0xC)
1876 active |= IFM_10G_SR;
1877 else if (trans_type == 0x80 || trans_type == 0)
1878 active |= IFM_10G_CX4;
1881 active |= IFM_40G_CR4;
1884 if (priv->prof->tx_pause)
1885 active |= IFM_ETH_TXPAUSE;
1886 if (priv->prof->rx_pause)
1887 active |= IFM_ETH_RXPAUSE;
1892 static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
1894 struct mlx4_en_priv *priv;
1896 priv = dev->if_softc;
1897 ifmr->ifm_status = IFM_AVALID;
1898 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN)
1899 ifmr->ifm_status |= IFM_ACTIVE;
1900 ifmr->ifm_active = mlx4_en_calc_media(priv);
1905 static int mlx4_en_media_change(struct ifnet *dev)
1907 struct mlx4_en_priv *priv;
1908 struct ifmedia *ifm;
1913 priv = dev->if_softc;
1915 rxpause = txpause = 0;
1918 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1920 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1927 if ((IFM_SUBTYPE(ifm->ifm_media)
1928 == IFM_SUBTYPE(mlx4_en_calc_media(priv)))
1929 && (ifm->ifm_media & IFM_FDX))
1933 printf("%s: Only auto media type\n", if_name(dev));
1936 /* Allow user to set/clear pause */
1937 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
1939 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
1941 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) {
1942 priv->prof->tx_pause = txpause;
1943 priv->prof->rx_pause = rxpause;
1944 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
1945 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause,
1946 priv->prof->tx_ppp, priv->prof->rx_pause,
1947 priv->prof->rx_ppp);
1952 static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
1954 struct mlx4_en_priv *priv;
1955 struct mlx4_en_dev *mdev;
1959 struct ifrsskey *ifrk;
1961 struct ifrsshash *ifrh;
1966 priv = dev->if_softc;
1968 /* check if detaching */
1969 if (priv == NULL || priv->gone != 0)
1973 ifr = (struct ifreq *) data;
1977 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu);
1980 if (dev->if_flags & IFF_UP) {
1981 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1982 mutex_lock(&mdev->state_lock);
1983 mlx4_en_start_port(dev);
1984 mutex_unlock(&mdev->state_lock);
1986 mlx4_en_set_rx_mode(dev);
1989 mutex_lock(&mdev->state_lock);
1990 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1991 mlx4_en_stop_port(dev);
1992 if_link_state_change(dev, LINK_STATE_DOWN);
1994 mutex_unlock(&mdev->state_lock);
1999 mlx4_en_set_rx_mode(dev);
2003 error = ifmedia_ioctl(dev, ifr, &priv->media, command);
2006 mutex_lock(&mdev->state_lock);
2007 mask = ifr->ifr_reqcap ^ dev->if_capenable;
2008 if (mask & IFCAP_TXCSUM) {
2009 dev->if_capenable ^= IFCAP_TXCSUM;
2010 dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2012 if (IFCAP_TSO4 & dev->if_capenable &&
2013 !(IFCAP_TXCSUM & dev->if_capenable)) {
2014 dev->if_capenable &= ~IFCAP_TSO4;
2015 dev->if_hwassist &= ~CSUM_IP_TSO;
2017 "tso4 disabled due to -txcsum.\n");
2020 if (mask & IFCAP_TXCSUM_IPV6) {
2021 dev->if_capenable ^= IFCAP_TXCSUM_IPV6;
2022 dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2024 if (IFCAP_TSO6 & dev->if_capenable &&
2025 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
2026 dev->if_capenable &= ~IFCAP_TSO6;
2027 dev->if_hwassist &= ~CSUM_IP6_TSO;
2029 "tso6 disabled due to -txcsum6.\n");
2032 if (mask & IFCAP_RXCSUM)
2033 dev->if_capenable ^= IFCAP_RXCSUM;
2034 if (mask & IFCAP_RXCSUM_IPV6)
2035 dev->if_capenable ^= IFCAP_RXCSUM_IPV6;
2037 if (mask & IFCAP_TSO4) {
2038 if (!(IFCAP_TSO4 & dev->if_capenable) &&
2039 !(IFCAP_TXCSUM & dev->if_capenable)) {
2040 if_printf(dev, "enable txcsum first.\n");
2044 dev->if_capenable ^= IFCAP_TSO4;
2045 dev->if_hwassist ^= CSUM_IP_TSO;
2047 if (mask & IFCAP_TSO6) {
2048 if (!(IFCAP_TSO6 & dev->if_capenable) &&
2049 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
2050 if_printf(dev, "enable txcsum6 first.\n");
2054 dev->if_capenable ^= IFCAP_TSO6;
2055 dev->if_hwassist ^= CSUM_IP6_TSO;
2057 if (mask & IFCAP_LRO)
2058 dev->if_capenable ^= IFCAP_LRO;
2059 if (mask & IFCAP_VLAN_HWTAGGING)
2060 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2061 if (mask & IFCAP_VLAN_HWFILTER)
2062 dev->if_capenable ^= IFCAP_VLAN_HWFILTER;
2063 if (mask & IFCAP_WOL_MAGIC)
2064 dev->if_capenable ^= IFCAP_WOL_MAGIC;
2065 if (dev->if_drv_flags & IFF_DRV_RUNNING)
2066 mlx4_en_start_port(dev);
2068 mutex_unlock(&mdev->state_lock);
2069 VLAN_CAPABILITIES(dev);
2071 #if __FreeBSD_version >= 1100036
2073 struct ifi2creq i2c;
2075 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2078 if (i2c.len > sizeof(i2c.data)) {
2083 * Note that we ignore i2c.addr here. The driver hardcodes
2084 * the address to 0x50, while standard expects it to be 0xA0.
2086 error = mlx4_get_module_info(mdev->dev, priv->port,
2087 i2c.offset, i2c.len, i2c.data);
2092 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2097 ifrk = (struct ifrsskey *)data;
2098 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
2099 mutex_lock(&mdev->state_lock);
2100 key = mlx4_en_get_rss_key(priv, &ifrk->ifrk_keylen);
2101 if (ifrk->ifrk_keylen > RSS_KEYLEN)
2104 memcpy(ifrk->ifrk_key, key, ifrk->ifrk_keylen);
2105 mutex_unlock(&mdev->state_lock);
2108 case SIOCGIFRSSHASH:
2109 mutex_lock(&mdev->state_lock);
2110 rss_mask = mlx4_en_get_rss_mask(priv);
2111 mutex_unlock(&mdev->state_lock);
2112 ifrh = (struct ifrsshash *)data;
2113 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
2114 ifrh->ifrh_types = 0;
2115 if (rss_mask & MLX4_RSS_IPV4)
2116 ifrh->ifrh_types |= RSS_TYPE_IPV4;
2117 if (rss_mask & MLX4_RSS_TCP_IPV4)
2118 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV4;
2119 if (rss_mask & MLX4_RSS_IPV6)
2120 ifrh->ifrh_types |= RSS_TYPE_IPV6;
2121 if (rss_mask & MLX4_RSS_TCP_IPV6)
2122 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV6;
2123 if (rss_mask & MLX4_RSS_UDP_IPV4)
2124 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV4;
2125 if (rss_mask & MLX4_RSS_UDP_IPV6)
2126 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV6;
2130 error = ether_ioctl(dev, command, data);
2138 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2139 struct mlx4_en_port_profile *prof)
2141 struct net_device *dev;
2142 struct mlx4_en_priv *priv;
2143 uint8_t dev_addr[ETHER_ADDR_LEN];
2147 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2148 dev = priv->dev = if_alloc(IFT_ETHER);
2150 en_err(priv, "Net device allocation failed\n");
2154 dev->if_softc = priv;
2155 if_initname(dev, "mlxen", (device_get_unit(
2156 mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1);
2157 dev->if_mtu = ETHERMTU;
2158 dev->if_init = mlx4_en_open;
2159 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2160 dev->if_ioctl = mlx4_en_ioctl;
2161 dev->if_transmit = mlx4_en_transmit;
2162 dev->if_qflush = mlx4_en_qflush;
2163 dev->if_snd.ifq_maxlen = prof->tx_ring_size;
2166 * Initialize driver private data
2168 priv->counter_index = 0xff;
2169 spin_lock_init(&priv->stats_lock);
2170 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2171 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2172 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2173 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2174 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2175 callout_init(&priv->watchdog_timer, 1);
2176 #ifdef CONFIG_RFS_ACCEL
2177 INIT_LIST_HEAD(&priv->filters);
2178 spin_lock_init(&priv->filters_lock);
2181 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2184 priv->ddev = &mdev->pdev->dev;
2187 priv->port_up = false;
2188 priv->flags = prof->flags;
2190 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2191 priv->tx_ring_num = prof->tx_ring_num;
2192 priv->tx_ring = kcalloc(MAX_TX_RINGS,
2193 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL);
2194 if (!priv->tx_ring) {
2198 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS,
2205 priv->rx_ring_num = prof->rx_ring_num;
2206 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2207 priv->mac_index = -1;
2208 priv->last_ifq_jiffies = 0;
2209 priv->if_counters_rx_errors = 0;
2210 priv->if_counters_rx_no_buffer = 0;
2211 #ifdef CONFIG_MLX4_EN_DCB
2212 if (!mlx4_is_slave(priv->mdev->dev)) {
2213 priv->dcbx_cap = DCB_CAP_DCBX_HOST;
2214 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
2215 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2216 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2218 en_info(priv, "QoS disabled - no HW support\n");
2219 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2224 /* Query for default mac and max mtu */
2225 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2226 priv->mac = mdev->dev->caps.def_mac[priv->port];
2227 if (ILLEGAL_MAC(priv->mac)) {
2228 #if BITS_PER_LONG == 64
2229 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n",
2230 priv->port, priv->mac);
2231 #elif BITS_PER_LONG == 32
2232 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
2233 priv->port, priv->mac);
2239 mlx4_en_sysctl_conf(priv);
2241 err = mlx4_en_alloc_resources(priv);
2245 /* Allocate page for receive rings */
2246 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2247 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2249 en_err(priv, "Failed to allocate page for rx qps\n");
2252 priv->allocated = 1;
2255 * Set driver features
2257 dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
2258 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
2259 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
2260 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
2261 dev->if_capabilities |= IFCAP_LRO;
2262 dev->if_capabilities |= IFCAP_HWSTATS;
2264 if (mdev->LSO_support)
2265 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO;
2267 #if __FreeBSD_version >= 1100000
2268 /* set TSO limits so that we don't have to drop TX packets */
2269 dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */;
2270 dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */;
2271 dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE;
2274 dev->if_capenable = dev->if_capabilities;
2276 dev->if_hwassist = 0;
2277 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6))
2278 dev->if_hwassist |= CSUM_TSO;
2279 if (dev->if_capenable & IFCAP_TXCSUM)
2280 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2281 if (dev->if_capenable & IFCAP_TXCSUM_IPV6)
2282 dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2285 /* Register for VLAN events */
2286 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
2287 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
2288 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
2289 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
2291 mdev->pndev[priv->port] = dev;
2293 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN;
2294 mlx4_en_set_default_moderation(priv);
2296 /* Set default MAC */
2297 for (i = 0; i < ETHER_ADDR_LEN; i++)
2298 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i));
2301 ether_ifattach(dev, dev_addr);
2302 if_link_state_change(dev, LINK_STATE_DOWN);
2303 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
2304 mlx4_en_media_change, mlx4_en_media_status);
2305 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL);
2306 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL);
2307 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL);
2308 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL);
2309 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2310 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
2312 NETDUMP_SET(dev, mlx4_en);
2314 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2315 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2317 priv->registered = 1;
2319 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2320 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2323 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
2324 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2326 prof->tx_pause, prof->tx_ppp,
2327 prof->rx_pause, prof->rx_ppp);
2329 en_err(priv, "Failed setting port general configurations "
2330 "for port %d, with error %d\n", priv->port, err);
2335 en_warn(priv, "Initializing port\n");
2336 err = mlx4_INIT_PORT(mdev->dev, priv->port);
2338 en_err(priv, "Failed Initializing port\n");
2342 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2344 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2345 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY);
2350 mlx4_en_destroy_netdev(dev);
2354 static int mlx4_en_set_ring_size(struct net_device *dev,
2355 int rx_size, int tx_size)
2357 struct mlx4_en_priv *priv = netdev_priv(dev);
2358 struct mlx4_en_dev *mdev = priv->mdev;
2362 rx_size = roundup_pow_of_two(rx_size);
2363 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
2364 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
2365 tx_size = roundup_pow_of_two(tx_size);
2366 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
2367 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
2369 if (rx_size == (priv->port_up ?
2370 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) &&
2371 tx_size == priv->tx_ring[0]->size)
2373 mutex_lock(&mdev->state_lock);
2374 if (priv->port_up) {
2376 mlx4_en_stop_port(dev);
2378 mlx4_en_free_resources(priv);
2379 priv->prof->tx_ring_size = tx_size;
2380 priv->prof->rx_ring_size = rx_size;
2381 err = mlx4_en_alloc_resources(priv);
2383 en_err(priv, "Failed reallocating port resources\n");
2387 err = mlx4_en_start_port(dev);
2389 en_err(priv, "Failed starting port\n");
2392 mutex_unlock(&mdev->state_lock);
2395 static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS)
2397 struct mlx4_en_priv *priv;
2402 size = priv->prof->rx_ring_size;
2403 error = sysctl_handle_int(oidp, &size, 0, req);
2404 if (error || !req->newptr)
2406 error = -mlx4_en_set_ring_size(priv->dev, size,
2407 priv->prof->tx_ring_size);
2411 static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS)
2413 struct mlx4_en_priv *priv;
2418 size = priv->prof->tx_ring_size;
2419 error = sysctl_handle_int(oidp, &size, 0, req);
2420 if (error || !req->newptr)
2422 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size,
2428 static int mlx4_en_get_module_info(struct net_device *dev,
2429 struct ethtool_modinfo *modinfo)
2431 struct mlx4_en_priv *priv = netdev_priv(dev);
2432 struct mlx4_en_dev *mdev = priv->mdev;
2436 /* Read first 2 bytes to get Module & REV ID */
2437 ret = mlx4_get_module_info(mdev->dev, priv->port,
2438 0/*offset*/, 2/*size*/, data);
2441 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret);
2445 switch (data[0] /* identifier */) {
2446 case MLX4_MODULE_ID_QSFP:
2447 modinfo->type = ETH_MODULE_SFF_8436;
2448 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2450 case MLX4_MODULE_ID_QSFP_PLUS:
2451 if (data[1] >= 0x3) { /* revision id */
2452 modinfo->type = ETH_MODULE_SFF_8636;
2453 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2455 modinfo->type = ETH_MODULE_SFF_8436;
2456 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2459 case MLX4_MODULE_ID_QSFP28:
2460 modinfo->type = ETH_MODULE_SFF_8636;
2461 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2463 case MLX4_MODULE_ID_SFP:
2464 modinfo->type = ETH_MODULE_SFF_8472;
2465 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2468 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n");
2475 static int mlx4_en_get_module_eeprom(struct net_device *dev,
2476 struct ethtool_eeprom *ee,
2479 struct mlx4_en_priv *priv = netdev_priv(dev);
2480 struct mlx4_en_dev *mdev = priv->mdev;
2481 int offset = ee->offset;
2487 memset(data, 0, ee->len);
2489 while (i < ee->len) {
2491 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
2492 i, offset, ee->len - i);
2494 ret = mlx4_get_module_info(mdev->dev, priv->port,
2495 offset, ee->len - i, data + i);
2497 if (!ret) /* Done reading */
2502 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
2503 i, offset, ee->len - i, ret);
2513 static void mlx4_en_print_eeprom(u8 *data, __u32 len)
2518 const int NUM_OF_BYTES = 16;
2520 printf("\nOffset\t\tValues\n");
2521 printf("------\t\t------\n");
2523 printf("0x%04x\t\t",row);
2524 for(i=0; i < NUM_OF_BYTES; i++){
2525 printf("%02x ", data[j]);
2533 /* Read cable EEPROM module information by first inspecting the first
2534 * two bytes to get the length and then read the rest of the information.
2535 * The information is printed to dmesg. */
2536 static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS)
2542 struct mlx4_en_priv *priv;
2543 struct net_device *dev;
2544 struct ethtool_modinfo modinfo;
2545 struct ethtool_eeprom ee;
2547 error = sysctl_handle_int(oidp, &result, 0, req);
2548 if (error || !req->newptr)
2554 data = kmalloc(PAGE_SIZE, GFP_KERNEL);
2556 error = mlx4_en_get_module_info(dev, &modinfo);
2559 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n",
2564 ee.len = modinfo.eeprom_len;
2567 error = mlx4_en_get_module_eeprom(dev, &ee, data);
2570 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n",
2572 /* Continue printing partial information in case of an error */
2575 /* EEPROM information will be printed in dmesg */
2576 mlx4_en_print_eeprom(data, ee.len);
2580 /* Return zero to prevent sysctl failure. */
2584 static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS)
2586 struct mlx4_en_priv *priv;
2591 ppp = priv->prof->tx_ppp;
2592 error = sysctl_handle_int(oidp, &ppp, 0, req);
2593 if (error || !req->newptr)
2595 if (ppp > 0xff || ppp < 0)
2597 priv->prof->tx_ppp = ppp;
2598 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2599 priv->rx_mb_size + ETHER_CRC_LEN,
2600 priv->prof->tx_pause,
2602 priv->prof->rx_pause,
2603 priv->prof->rx_ppp);
2608 static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS)
2610 struct mlx4_en_priv *priv;
2611 struct mlx4_en_dev *mdev;
2619 ppp = priv->prof->rx_ppp;
2620 error = sysctl_handle_int(oidp, &ppp, 0, req);
2621 if (error || !req->newptr)
2623 if (ppp > 0xff || ppp < 0)
2625 /* See if we have to change the number of tx queues. */
2626 if (!ppp != !priv->prof->rx_ppp) {
2627 mutex_lock(&mdev->state_lock);
2628 if (priv->port_up) {
2630 mlx4_en_stop_port(priv->dev);
2632 mlx4_en_free_resources(priv);
2633 priv->prof->rx_ppp = ppp;
2634 error = -mlx4_en_alloc_resources(priv);
2636 en_err(priv, "Failed reallocating port resources\n");
2637 if (error == 0 && port_up) {
2638 error = -mlx4_en_start_port(priv->dev);
2640 en_err(priv, "Failed starting port\n");
2642 mutex_unlock(&mdev->state_lock);
2646 priv->prof->rx_ppp = ppp;
2647 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2648 priv->rx_mb_size + ETHER_CRC_LEN,
2649 priv->prof->tx_pause,
2651 priv->prof->rx_pause,
2652 priv->prof->rx_ppp);
2657 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
2659 struct net_device *dev;
2660 struct sysctl_ctx_list *ctx;
2661 struct sysctl_oid *node;
2662 struct sysctl_oid_list *node_list;
2663 struct sysctl_oid *coal;
2664 struct sysctl_oid_list *coal_list;
2665 const char *pnameunit;
2667 ctx = &priv->conf_ctx;
2668 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev);
2670 sysctl_ctx_init(ctx);
2671 priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw),
2672 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet");
2673 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO,
2674 "conf", CTLFLAG_RD, NULL, "Configuration");
2675 node_list = SYSCTL_CHILDREN(node);
2677 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable",
2678 CTLFLAG_RW, &priv->msg_enable, 0,
2679 "Driver message enable bitfield");
2680 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings",
2681 CTLFLAG_RD, &priv->rx_ring_num, 0,
2682 "Number of receive rings");
2683 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings",
2684 CTLFLAG_RD, &priv->tx_ring_num, 0,
2685 "Number of transmit rings");
2686 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size",
2687 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2688 mlx4_en_set_rx_ring_size, "I", "Receive ring size");
2689 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size",
2690 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2691 mlx4_en_set_tx_ring_size, "I", "Transmit ring size");
2692 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp",
2693 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2694 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause");
2695 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp",
2696 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2697 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause");
2698 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num",
2699 CTLFLAG_RD, &priv->port, 0,
2701 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name",
2702 CTLFLAG_RD, __DECONST(void *, pnameunit), 0,
2704 /* Add coalescer configuration. */
2705 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO,
2706 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration");
2707 coal_list = SYSCTL_CHILDREN(coal);
2708 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low",
2709 CTLFLAG_RW, &priv->pkt_rate_low, 0,
2710 "Packets per-second for minimum delay");
2711 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low",
2712 CTLFLAG_RW, &priv->rx_usecs_low, 0,
2713 "Minimum RX delay in micro-seconds");
2714 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high",
2715 CTLFLAG_RW, &priv->pkt_rate_high, 0,
2716 "Packets per-second for maximum delay");
2717 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high",
2718 CTLFLAG_RW, &priv->rx_usecs_high, 0,
2719 "Maximum RX delay in micro-seconds");
2720 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval",
2721 CTLFLAG_RW, &priv->sample_interval, 0,
2722 "adaptive frequency in units of HZ ticks");
2723 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal",
2724 CTLFLAG_RW, &priv->adaptive_rx_coal, 0,
2725 "Enable adaptive rx coalescing");
2726 /* EEPROM support */
2727 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info",
2728 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2729 mlx4_en_read_eeprom, "I", "EEPROM information");
2732 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
2734 struct sysctl_ctx_list *ctx;
2735 struct sysctl_oid_list *node_list;
2736 struct sysctl_oid *ring_node;
2737 struct sysctl_oid_list *ring_list;
2738 struct mlx4_en_tx_ring *tx_ring;
2739 struct mlx4_en_rx_ring *rx_ring;
2743 ctx = &priv->stat_ctx;
2744 sysctl_ctx_init(ctx);
2745 priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO,
2746 "stat", CTLFLAG_RD, NULL, "Statistics");
2747 node_list = SYSCTL_CHILDREN(priv->stat_sysctl);
2749 #ifdef MLX4_EN_PERF_STAT
2750 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD,
2751 &priv->pstats.tx_poll, "TX Poll calls");
2752 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD,
2753 &priv->pstats.tx_pktsz_avg, "TX average packet size");
2754 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD,
2755 &priv->pstats.inflight_avg, "TX average packets in-flight");
2756 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD,
2757 &priv->pstats.tx_coal_avg, "TX average coalesced completions");
2758 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD,
2759 &priv->pstats.rx_coal_avg, "RX average coalesced completions");
2762 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD,
2763 &priv->port_stats.tso_packets, 0, "TSO packets sent");
2764 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD,
2765 &priv->port_stats.queue_stopped, 0, "Queue full");
2766 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD,
2767 &priv->port_stats.wake_queue, 0, "Queue resumed after full");
2768 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
2769 &priv->port_stats.tx_timeout, 0, "Transmit timeouts");
2770 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD,
2771 &priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed");
2772 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
2773 &priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf");
2774 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
2775 &priv->port_stats.rx_chksum_good, 0, "RX checksum offload success");
2776 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD,
2777 &priv->port_stats.rx_chksum_none, 0, "RX without checksum offload");
2778 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload",
2779 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0,
2780 "TX checksum offloads");
2781 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts",
2782 CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0,
2783 "Oversized chains defragged");
2785 /* Could strdup the names and add in a loop. This is simpler. */
2786 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
2787 &priv->pkstats.rx_bytes, 0, "RX Bytes");
2788 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD,
2789 &priv->pkstats.rx_packets, 0, "RX packets");
2790 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD,
2791 &priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets");
2792 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD,
2793 &priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets");
2794 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD,
2795 &priv->pkstats.rx_errors, 0, "RX Errors");
2796 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD,
2797 &priv->pkstats.rx_dropped, 0, "RX Dropped");
2798 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD,
2799 &priv->pkstats.rx_length_errors, 0, "RX Length Errors");
2800 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD,
2801 &priv->pkstats.rx_over_errors, 0, "RX Over Errors");
2802 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
2803 &priv->pkstats.rx_crc_errors, 0, "RX CRC Errors");
2804 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD,
2805 &priv->pkstats.rx_jabbers, 0, "RX Jabbers");
2807 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD,
2808 &priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error");
2809 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error",
2810 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0,
2811 "RX Out Range Length Error");
2812 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD,
2813 &priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets");
2814 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD,
2815 &priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets");
2816 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD,
2817 &priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets");
2818 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD,
2819 &priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets");
2820 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD,
2821 &priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets");
2822 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD,
2823 &priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets");
2824 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD,
2825 &priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets");
2826 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD,
2827 &priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets");
2828 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD,
2829 &priv->pkstats.rx_gt_1548_bytes_packets, 0,
2830 "RX Greater Then 1548 bytes Packets");
2832 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
2833 &priv->pkstats.tx_packets, 0, "TX packets");
2834 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
2835 &priv->pkstats.tx_bytes, 0, "TX Bytes");
2836 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
2837 &priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets");
2838 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
2839 &priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets");
2840 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD,
2841 &priv->pkstats.tx_errors, 0, "TX Errors");
2842 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD,
2843 &priv->pkstats.tx_dropped, 0, "TX Dropped");
2844 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD,
2845 &priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets");
2846 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD,
2847 &priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets");
2848 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD,
2849 &priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets");
2850 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD,
2851 &priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets");
2852 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD,
2853 &priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets");
2854 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD,
2855 &priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets");
2856 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD,
2857 &priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets");
2858 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD,
2859 &priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets");
2860 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD,
2861 &priv->pkstats.tx_gt_1548_bytes_packets, 0,
2862 "TX Greater Then 1548 Bytes Packets");
2864 for (i = 0; i < priv->tx_ring_num; i++) {
2865 tx_ring = priv->tx_ring[i];
2866 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i);
2867 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2868 CTLFLAG_RD, NULL, "TX Ring");
2869 ring_list = SYSCTL_CHILDREN(ring_node);
2870 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
2871 CTLFLAG_RD, &tx_ring->packets, 0, "TX packets");
2872 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
2873 CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes");
2874 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets",
2875 CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets");
2876 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts",
2877 CTLFLAG_RD, &tx_ring->defrag_attempts, 0,
2878 "Oversized chains defragged");
2881 for (i = 0; i < priv->rx_ring_num; i++) {
2882 rx_ring = priv->rx_ring[i];
2883 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i);
2884 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2885 CTLFLAG_RD, NULL, "RX Ring");
2886 ring_list = SYSCTL_CHILDREN(ring_node);
2887 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
2888 CTLFLAG_RD, &rx_ring->packets, 0, "RX packets");
2889 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
2890 CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes");
2891 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error",
2892 CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors");
2898 mlx4_en_netdump_init(struct ifnet *dev, int *nrxr, int *ncl, int *clsize)
2900 struct mlx4_en_priv *priv;
2902 priv = if_getsoftc(dev);
2903 mutex_lock(&priv->mdev->state_lock);
2904 *nrxr = priv->rx_ring_num;
2905 *ncl = NETDUMP_MAX_IN_FLIGHT;
2906 *clsize = priv->rx_mb_size;
2907 mutex_unlock(&priv->mdev->state_lock);
2911 mlx4_en_netdump_event(struct ifnet *dev, enum netdump_ev event)
2916 mlx4_en_netdump_transmit(struct ifnet *dev, struct mbuf *m)
2918 struct mlx4_en_priv *priv;
2921 priv = if_getsoftc(dev);
2922 if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2923 IFF_DRV_RUNNING || !priv->link_state)
2926 err = mlx4_en_xmit(priv, 0, &m);
2927 if (err != 0 && m != NULL)
2933 mlx4_en_netdump_poll(struct ifnet *dev, int count)
2935 struct mlx4_en_priv *priv;
2937 priv = if_getsoftc(dev);
2938 if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0 || !priv->link_state)
2941 mlx4_poll_interrupts(priv->mdev->dev);
2945 #endif /* NETDUMP */