2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #include <linux/compat.h>
38 #ifdef CONFIG_NET_RX_BUSY_POLL
39 #include <net/busy_poll.h>
42 #include <linux/list.h>
43 #include <linux/if_ether.h>
45 #include <dev/mlx4/driver.h>
46 #include <dev/mlx4/device.h>
47 #include <dev/mlx4/cmd.h>
48 #include <dev/mlx4/cq.h>
50 #include <sys/eventhandler.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
57 DEBUGNET_DEFINE(mlx4_en);
59 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv);
60 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv);
62 #ifdef CONFIG_NET_RX_BUSY_POLL
63 /* must be called with local_bh_disable()d */
64 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
66 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
67 struct net_device *dev = cq->dev;
68 struct mlx4_en_priv *priv = netdev_priv(dev);
69 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
73 return LL_FLUSH_FAILED;
75 if (!mlx4_en_cq_lock_poll(cq))
78 done = mlx4_en_process_rx_cq(dev, cq, 4);
79 #ifdef LL_EXTENDED_STATS
81 rx_ring->cleaned += done;
86 mlx4_en_cq_unlock_poll(cq);
90 #endif /* CONFIG_NET_RX_BUSY_POLL */
92 #ifdef CONFIG_RFS_ACCEL
94 struct mlx4_en_filter {
95 struct list_head next;
96 struct work_struct work;
105 struct mlx4_en_priv *priv;
106 u32 flow_id; /* RFS infrastructure id */
107 int id; /* mlx4_en driver id */
108 u64 reg_id; /* Flow steering API id */
109 u8 activated; /* Used to prevent expiry before filter
112 struct hlist_node filter_chain;
115 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
117 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
121 return MLX4_NET_TRANS_RULE_ID_UDP;
123 return MLX4_NET_TRANS_RULE_ID_TCP;
125 return MLX4_NET_TRANS_RULE_NUM;
129 static void mlx4_en_filter_work(struct work_struct *work)
131 struct mlx4_en_filter *filter = container_of(work,
132 struct mlx4_en_filter,
134 struct mlx4_en_priv *priv = filter->priv;
135 struct mlx4_spec_list spec_tcp_udp = {
136 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
139 .dst_port = filter->dst_port,
140 .dst_port_msk = (__force __be16)-1,
141 .src_port = filter->src_port,
142 .src_port_msk = (__force __be16)-1,
146 struct mlx4_spec_list spec_ip = {
147 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
150 .dst_ip = filter->dst_ip,
151 .dst_ip_msk = (__force __be32)-1,
152 .src_ip = filter->src_ip,
153 .src_ip_msk = (__force __be32)-1,
157 struct mlx4_spec_list spec_eth = {
158 .id = MLX4_NET_TRANS_RULE_ID_ETH,
160 struct mlx4_net_trans_rule rule = {
161 .list = LIST_HEAD_INIT(rule.list),
162 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
165 .promisc_mode = MLX4_FS_REGULAR,
167 .priority = MLX4_DOMAIN_RFS,
170 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
172 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
173 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
177 list_add_tail(&spec_eth.list, &rule.list);
178 list_add_tail(&spec_ip.list, &rule.list);
179 list_add_tail(&spec_tcp_udp.list, &rule.list);
181 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
182 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
183 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
185 filter->activated = 0;
187 if (filter->reg_id) {
188 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
189 if (rc && rc != -ENOENT)
190 en_err(priv, "Error detaching flow. rc = %d\n", rc);
193 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
195 en_err(priv, "Error attaching flow. err = %d\n", rc);
198 mlx4_en_filter_rfs_expire(priv);
200 filter->activated = 1;
203 static inline struct hlist_head *
204 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
205 __be16 src_port, __be16 dst_port)
210 l = (__force unsigned long)src_port |
211 ((__force unsigned long)dst_port << 2);
212 l ^= (__force unsigned long)(src_ip ^ dst_ip);
214 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
216 return &priv->filter_hash[bucket_idx];
219 static struct mlx4_en_filter *
220 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
221 __be32 dst_ip, u8 ip_proto, __be16 src_port,
222 __be16 dst_port, u32 flow_id)
224 struct mlx4_en_filter *filter = NULL;
226 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
231 filter->rxq_index = rxq_index;
232 INIT_WORK(&filter->work, mlx4_en_filter_work);
234 filter->src_ip = src_ip;
235 filter->dst_ip = dst_ip;
236 filter->ip_proto = ip_proto;
237 filter->src_port = src_port;
238 filter->dst_port = dst_port;
240 filter->flow_id = flow_id;
242 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
244 list_add_tail(&filter->next, &priv->filters);
245 hlist_add_head(&filter->filter_chain,
246 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
252 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
254 struct mlx4_en_priv *priv = filter->priv;
257 list_del(&filter->next);
259 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
260 if (rc && rc != -ENOENT)
261 en_err(priv, "Error detaching flow. rc = %d\n", rc);
266 static inline struct mlx4_en_filter *
267 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
268 u8 ip_proto, __be16 src_port, __be16 dst_port)
270 struct mlx4_en_filter *filter;
271 struct mlx4_en_filter *ret = NULL;
273 hlist_for_each_entry(filter,
274 filter_hash_bucket(priv, src_ip, dst_ip,
277 if (filter->src_ip == src_ip &&
278 filter->dst_ip == dst_ip &&
279 filter->ip_proto == ip_proto &&
280 filter->src_port == src_port &&
281 filter->dst_port == dst_port) {
291 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
292 u16 rxq_index, u32 flow_id)
294 struct mlx4_en_priv *priv = netdev_priv(net_dev);
295 struct mlx4_en_filter *filter;
296 const struct iphdr *ip;
303 int nhoff = skb_network_offset(skb);
306 if (skb->protocol != htons(ETH_P_IP))
307 return -EPROTONOSUPPORT;
309 ip = (const struct iphdr *)(skb->data + nhoff);
310 if (ip_is_fragment(ip))
311 return -EPROTONOSUPPORT;
313 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
314 return -EPROTONOSUPPORT;
315 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
317 ip_proto = ip->protocol;
323 spin_lock_bh(&priv->filters_lock);
324 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
327 if (filter->rxq_index == rxq_index)
330 filter->rxq_index = rxq_index;
332 filter = mlx4_en_filter_alloc(priv, rxq_index,
333 src_ip, dst_ip, ip_proto,
334 src_port, dst_port, flow_id);
341 queue_work(priv->mdev->workqueue, &filter->work);
346 spin_unlock_bh(&priv->filters_lock);
351 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
353 struct mlx4_en_filter *filter, *tmp;
356 spin_lock_bh(&priv->filters_lock);
357 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
358 list_move(&filter->next, &del_list);
359 hlist_del(&filter->filter_chain);
361 spin_unlock_bh(&priv->filters_lock);
363 list_for_each_entry_safe(filter, tmp, &del_list, next) {
364 cancel_work_sync(&filter->work);
365 mlx4_en_filter_free(filter);
369 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
371 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
375 spin_lock_bh(&priv->filters_lock);
376 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
377 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
380 if (filter->activated &&
381 !work_pending(&filter->work) &&
382 rps_may_expire_flow(priv->dev,
383 filter->rxq_index, filter->flow_id,
385 list_move(&filter->next, &del_list);
386 hlist_del(&filter->filter_chain);
388 last_filter = filter;
393 if (last_filter && (&last_filter->next != priv->filters.next))
394 list_move(&priv->filters, &last_filter->next);
396 spin_unlock_bh(&priv->filters_lock);
398 list_for_each_entry_safe(filter, tmp, &del_list, next)
399 mlx4_en_filter_free(filter);
403 static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid)
405 struct mlx4_en_priv *priv = netdev_priv(dev);
406 struct mlx4_en_dev *mdev = priv->mdev;
413 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
415 set_bit(vid, priv->active_vlans);
417 /* Add VID to port VLAN filter */
418 mutex_lock(&mdev->state_lock);
419 if (mdev->device_up && priv->port_up) {
420 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
422 en_err(priv, "Failed configuring VLAN filter\n");
424 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
425 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
426 mutex_unlock(&mdev->state_lock);
430 static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid)
432 struct mlx4_en_priv *priv = netdev_priv(dev);
433 struct mlx4_en_dev *mdev = priv->mdev;
439 en_dbg(HW, priv, "Killing VID:%d\n", vid);
441 clear_bit(vid, priv->active_vlans);
443 /* Remove VID from port VLAN filter */
444 mutex_lock(&mdev->state_lock);
445 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
447 if (mdev->device_up && priv->port_up) {
448 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
450 en_err(priv, "Failed configuring VLAN filter\n");
452 mutex_unlock(&mdev->state_lock);
456 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
457 int qpn, u64 *reg_id)
461 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
462 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
463 return 0; /* do nothing */
465 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
466 MLX4_DOMAIN_NIC, reg_id);
468 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
471 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id);
475 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
476 unsigned char *mac, int *qpn, u64 *reg_id)
478 struct mlx4_en_dev *mdev = priv->mdev;
479 struct mlx4_dev *dev = mdev->dev;
482 switch (dev->caps.steering_mode) {
483 case MLX4_STEERING_MODE_B0: {
488 memcpy(&gid[10], mac, ETH_ALEN);
491 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
494 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
495 struct mlx4_spec_list spec_eth = { {NULL} };
496 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
498 struct mlx4_net_trans_rule rule = {
499 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
502 .promisc_mode = MLX4_FS_REGULAR,
503 .priority = MLX4_DOMAIN_NIC,
506 rule.port = priv->port;
508 INIT_LIST_HEAD(&rule.list);
510 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
511 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
512 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
513 list_add_tail(&spec_eth.list, &rule.list);
515 err = mlx4_flow_attach(dev, &rule, reg_id);
522 en_warn(priv, "Failed Attaching Unicast\n");
527 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
528 unsigned char *mac, int qpn, u64 reg_id)
530 struct mlx4_en_dev *mdev = priv->mdev;
531 struct mlx4_dev *dev = mdev->dev;
533 switch (dev->caps.steering_mode) {
534 case MLX4_STEERING_MODE_B0: {
539 memcpy(&gid[10], mac, ETH_ALEN);
542 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
545 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
546 mlx4_flow_detach(dev, reg_id);
550 en_err(priv, "Invalid steering mode.\n");
554 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
556 struct mlx4_en_dev *mdev = priv->mdev;
557 struct mlx4_dev *dev = mdev->dev;
560 int *qpn = &priv->base_qpn;
561 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
563 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
564 IF_LLADDR(priv->dev));
565 index = mlx4_register_mac(dev, priv->port, mac);
568 en_err(priv, "Failed adding MAC: %pM\n",
569 IF_LLADDR(priv->dev));
573 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
574 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
575 *qpn = base_qpn + index;
579 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
580 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
582 en_err(priv, "Failed to reserve qp for mac registration\n");
583 mlx4_unregister_mac(dev, priv->port, mac);
590 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
592 struct mlx4_en_dev *mdev = priv->mdev;
593 struct mlx4_dev *dev = mdev->dev;
594 int qpn = priv->base_qpn;
596 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
597 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
598 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
599 IF_LLADDR(priv->dev));
600 mlx4_unregister_mac(dev, priv->port, mac);
602 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
604 mlx4_qp_release_range(dev, qpn, 1);
605 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
609 static void mlx4_en_clear_uclist(struct net_device *dev)
611 struct mlx4_en_priv *priv = netdev_priv(dev);
612 struct mlx4_en_addr_list *tmp, *uc_to_del;
614 list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) {
615 list_del(&uc_to_del->list);
620 static u_int mlx4_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
622 struct mlx4_en_priv *priv = arg;
623 struct mlx4_en_addr_list *tmp;
625 if (sdl->sdl_alen != ETHER_ADDR_LEN) /* XXXGL: can that happen? */
627 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
629 en_err(priv, "Failed to allocate address list\n");
632 memcpy(tmp->addr, LLADDR(sdl), ETH_ALEN);
633 list_add_tail(&tmp->list, &priv->uc_list);
638 static void mlx4_en_cache_uclist(struct net_device *dev)
640 struct mlx4_en_priv *priv = netdev_priv(dev);
642 mlx4_en_clear_uclist(dev);
643 if_foreach_lladdr(dev, mlx4_copy_addr, priv);
646 static void mlx4_en_clear_mclist(struct net_device *dev)
648 struct mlx4_en_priv *priv = netdev_priv(dev);
649 struct mlx4_en_addr_list *tmp, *mc_to_del;
651 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
652 list_del(&mc_to_del->list);
657 static u_int mlx4_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int count)
659 struct mlx4_en_priv *priv = arg;
660 struct mlx4_en_addr_list *tmp;
662 if (sdl->sdl_alen != ETHER_ADDR_LEN) /* XXXGL: can that happen? */
664 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
666 en_err(priv, "Failed to allocate address list\n");
669 memcpy(tmp->addr, LLADDR(sdl), ETH_ALEN);
670 list_add_tail(&tmp->list, &priv->mc_list);
674 static void mlx4_en_cache_mclist(struct net_device *dev)
676 struct mlx4_en_priv *priv = netdev_priv(dev);
678 mlx4_en_clear_mclist(dev);
679 if_foreach_llmaddr(dev, mlx4_copy_maddr, priv);
682 static void update_addr_list_flags(struct mlx4_en_priv *priv,
683 struct list_head *dst,
684 struct list_head *src)
686 struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc;
689 /* Find all the entries that should be removed from dst,
690 * These are the entries that are not found in src
692 list_for_each_entry(dst_tmp, dst, list) {
694 list_for_each_entry(src_tmp, src, list) {
695 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
701 dst_tmp->action = MLX4_ADDR_LIST_REM;
704 /* Add entries that exist in src but not in dst
705 * mark them as need to add
707 list_for_each_entry(src_tmp, src, list) {
709 list_for_each_entry(dst_tmp, dst, list) {
710 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
711 dst_tmp->action = MLX4_ADDR_LIST_NONE;
717 new_mc = kmalloc(sizeof(struct mlx4_en_addr_list),
720 en_err(priv, "Failed to allocate current multicast list\n");
723 memcpy(new_mc, src_tmp,
724 sizeof(struct mlx4_en_addr_list));
725 new_mc->action = MLX4_ADDR_LIST_ADD;
726 list_add_tail(&new_mc->list, dst);
731 static void mlx4_en_set_rx_mode(struct net_device *dev)
733 struct mlx4_en_priv *priv = netdev_priv(dev);
738 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
741 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
742 struct mlx4_en_dev *mdev)
746 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
747 priv->flags |= MLX4_EN_FLAG_PROMISC;
749 /* Enable promiscouos mode */
750 switch (mdev->dev->caps.steering_mode) {
751 case MLX4_STEERING_MODE_DEVICE_MANAGED:
752 err = mlx4_flow_steer_promisc_add(mdev->dev,
755 MLX4_FS_ALL_DEFAULT);
757 en_err(priv, "Failed enabling promiscuous mode\n");
758 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
761 case MLX4_STEERING_MODE_B0:
762 err = mlx4_unicast_promisc_add(mdev->dev,
766 en_err(priv, "Failed enabling unicast promiscuous mode\n");
768 /* Add the default qp number as multicast
771 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
772 err = mlx4_multicast_promisc_add(mdev->dev,
776 en_err(priv, "Failed enabling multicast promiscuous mode\n");
777 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
781 case MLX4_STEERING_MODE_A0:
782 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
787 en_err(priv, "Failed enabling promiscuous mode\n");
791 /* Disable port multicast filter (unconditionally) */
792 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
793 0, MLX4_MCAST_DISABLE);
795 en_err(priv, "Failed disabling multicast filter\n");
799 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
800 struct mlx4_en_dev *mdev)
804 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
806 /* Disable promiscouos mode */
807 switch (mdev->dev->caps.steering_mode) {
808 case MLX4_STEERING_MODE_DEVICE_MANAGED:
809 err = mlx4_flow_steer_promisc_remove(mdev->dev,
811 MLX4_FS_ALL_DEFAULT);
813 en_err(priv, "Failed disabling promiscuous mode\n");
814 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
817 case MLX4_STEERING_MODE_B0:
818 err = mlx4_unicast_promisc_remove(mdev->dev,
822 en_err(priv, "Failed disabling unicast promiscuous mode\n");
823 /* Disable Multicast promisc */
824 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
825 err = mlx4_multicast_promisc_remove(mdev->dev,
829 en_err(priv, "Failed disabling multicast promiscuous mode\n");
830 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
834 case MLX4_STEERING_MODE_A0:
835 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
839 en_err(priv, "Failed disabling promiscuous mode\n");
844 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
845 struct net_device *dev,
846 struct mlx4_en_dev *mdev)
848 struct mlx4_en_addr_list *addr_list, *tmp;
849 u8 mc_list[16] = {0};
854 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
855 if (dev->if_flags & IFF_ALLMULTI) {
856 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
857 0, MLX4_MCAST_DISABLE);
859 en_err(priv, "Failed disabling multicast filter\n");
861 /* Add the default qp number as multicast promisc */
862 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
863 switch (mdev->dev->caps.steering_mode) {
864 case MLX4_STEERING_MODE_DEVICE_MANAGED:
865 err = mlx4_flow_steer_promisc_add(mdev->dev,
871 case MLX4_STEERING_MODE_B0:
872 err = mlx4_multicast_promisc_add(mdev->dev,
877 case MLX4_STEERING_MODE_A0:
881 en_err(priv, "Failed entering multicast promisc mode\n");
882 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
885 /* Disable Multicast promisc */
886 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
887 switch (mdev->dev->caps.steering_mode) {
888 case MLX4_STEERING_MODE_DEVICE_MANAGED:
889 err = mlx4_flow_steer_promisc_remove(mdev->dev,
894 case MLX4_STEERING_MODE_B0:
895 err = mlx4_multicast_promisc_remove(mdev->dev,
900 case MLX4_STEERING_MODE_A0:
904 en_err(priv, "Failed disabling multicast promiscuous mode\n");
905 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
908 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
909 0, MLX4_MCAST_DISABLE);
911 en_err(priv, "Failed disabling multicast filter\n");
913 /* Flush mcast filter and init it with broadcast address */
914 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
915 1, MLX4_MCAST_CONFIG);
917 /* Update multicast list - we cache all addresses so they won't
918 * change while HW is updated holding the command semaphor */
919 mlx4_en_cache_mclist(dev);
920 list_for_each_entry(addr_list, &priv->mc_list, list) {
921 mcast_addr = mlx4_mac_to_u64(addr_list->addr);
922 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
923 mcast_addr, 0, MLX4_MCAST_CONFIG);
925 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
926 0, MLX4_MCAST_ENABLE);
928 en_err(priv, "Failed enabling multicast filter\n");
930 update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list);
932 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
933 if (addr_list->action == MLX4_ADDR_LIST_REM) {
934 /* detach this address and delete from list */
935 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
936 mc_list[5] = priv->port;
937 err = mlx4_multicast_detach(mdev->dev,
938 &priv->rss_map.indir_qp,
943 en_err(priv, "Fail to detach multicast address\n");
945 if (addr_list->tunnel_reg_id) {
946 err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id);
948 en_err(priv, "Failed to detach multicast address\n");
951 /* remove from list */
952 list_del(&addr_list->list);
954 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
955 /* attach the address */
956 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
957 /* needed for B0 steering support */
958 mc_list[5] = priv->port;
959 err = mlx4_multicast_attach(mdev->dev,
960 &priv->rss_map.indir_qp,
966 en_err(priv, "Fail to attach multicast address\n");
968 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
969 &addr_list->tunnel_reg_id);
971 en_err(priv, "Failed to attach multicast address\n");
977 static void mlx4_en_do_unicast(struct mlx4_en_priv *priv,
978 struct net_device *dev,
979 struct mlx4_en_dev *mdev)
981 struct mlx4_en_addr_list *addr_list, *tmp;
984 /* Update unicast list */
985 mlx4_en_cache_uclist(dev);
987 update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list);
989 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
990 if (addr_list->action == MLX4_ADDR_LIST_REM) {
991 mlx4_en_uc_steer_release(priv, addr_list->addr,
992 priv->rss_map.indir_qp.qpn,
994 /* remove from list */
995 list_del(&addr_list->list);
997 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
998 err = mlx4_en_uc_steer_add(priv, addr_list->addr,
999 &priv->rss_map.indir_qp.qpn,
1000 &addr_list->reg_id);
1002 en_err(priv, "Fail to add unicast address\n");
1007 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1009 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1011 struct mlx4_en_dev *mdev = priv->mdev;
1012 struct net_device *dev = priv->dev;
1014 mutex_lock(&mdev->state_lock);
1015 if (!mdev->device_up) {
1016 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1019 if (!priv->port_up) {
1020 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1023 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1024 if (priv->port_state.link_state) {
1025 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1026 /* update netif baudrate */
1027 priv->dev->if_baudrate =
1028 IF_Mbps(priv->port_state.link_speed);
1029 /* Important note: the following call for if_link_state_change
1030 * is needed for interface up scenario (start port, link state
1032 if_link_state_change(priv->dev, LINK_STATE_UP);
1033 en_dbg(HW, priv, "Link Up\n");
1037 /* Set unicast rules */
1038 mlx4_en_do_unicast(priv, dev, mdev);
1040 /* Promsicuous mode: disable all filters */
1041 if ((dev->if_flags & IFF_PROMISC) ||
1042 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1043 mlx4_en_set_promisc_mode(priv, mdev);
1044 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1045 /* Not in promiscuous mode */
1046 mlx4_en_clear_promisc_mode(priv, mdev);
1049 /* Set multicast rules */
1050 mlx4_en_do_multicast(priv, dev, mdev);
1052 mutex_unlock(&mdev->state_lock);
1055 static void mlx4_en_watchdog_timeout(void *arg)
1057 struct mlx4_en_priv *priv = arg;
1058 struct mlx4_en_dev *mdev = priv->mdev;
1060 en_dbg(DRV, priv, "Scheduling watchdog\n");
1061 queue_work(mdev->workqueue, &priv->watchdog_task);
1063 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1064 mlx4_en_watchdog_timeout, priv);
1069 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1071 struct mlx4_en_cq *cq;
1074 /* If we haven't received a specific coalescing setting
1075 * (module param), we set the moderation parameters as follows:
1076 * - moder_cnt is set to the number of mtu sized packets to
1077 * satisfy our coalescing target.
1078 * - moder_time is set to a fixed value.
1080 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1081 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1082 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1083 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1084 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - "
1085 "rx_frames:%d rx_usecs:%d\n",
1086 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs);
1088 /* Setup cq moderation params */
1089 for (i = 0; i < priv->rx_ring_num; i++) {
1090 cq = priv->rx_cq[i];
1091 cq->moder_cnt = priv->rx_frames;
1092 cq->moder_time = priv->rx_usecs;
1093 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1094 priv->last_moder_packets[i] = 0;
1095 priv->last_moder_bytes[i] = 0;
1098 for (i = 0; i < priv->tx_ring_num; i++) {
1099 cq = priv->tx_cq[i];
1100 cq->moder_cnt = priv->tx_frames;
1101 cq->moder_time = priv->tx_usecs;
1104 /* Reset auto-moderation params */
1105 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1106 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1107 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1108 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1109 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1110 priv->adaptive_rx_coal = 1;
1111 priv->last_moder_jiffies = 0;
1112 priv->last_moder_tx_packets = 0;
1115 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1117 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1118 struct mlx4_en_cq *cq;
1119 unsigned long packets;
1121 unsigned long avg_pkt_size;
1122 unsigned long rx_packets;
1123 unsigned long rx_bytes;
1124 unsigned long rx_pkt_diff;
1128 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1131 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1132 spin_lock(&priv->stats_lock);
1133 rx_packets = priv->rx_ring[ring]->packets;
1134 rx_bytes = priv->rx_ring[ring]->bytes;
1135 spin_unlock(&priv->stats_lock);
1137 rx_pkt_diff = ((unsigned long) (rx_packets -
1138 priv->last_moder_packets[ring]));
1139 packets = rx_pkt_diff;
1140 rate = packets * HZ / period;
1141 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1142 priv->last_moder_bytes[ring])) / packets : 0;
1144 /* Apply auto-moderation only when packet rate
1145 * exceeds a rate that it matters */
1146 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1147 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1148 if (rate < priv->pkt_rate_low)
1149 moder_time = priv->rx_usecs_low;
1150 else if (rate > priv->pkt_rate_high)
1151 moder_time = priv->rx_usecs_high;
1153 moder_time = (rate - priv->pkt_rate_low) *
1154 (priv->rx_usecs_high - priv->rx_usecs_low) /
1155 (priv->pkt_rate_high - priv->pkt_rate_low) +
1158 moder_time = priv->rx_usecs_low;
1161 if (moder_time != priv->last_moder_time[ring]) {
1162 priv->last_moder_time[ring] = moder_time;
1163 cq = priv->rx_cq[ring];
1164 cq->moder_time = moder_time;
1165 cq->moder_cnt = priv->rx_frames;
1166 err = mlx4_en_set_cq_moder(priv, cq);
1168 en_err(priv, "Failed modifying moderation for cq:%d\n",
1171 priv->last_moder_packets[ring] = rx_packets;
1172 priv->last_moder_bytes[ring] = rx_bytes;
1175 priv->last_moder_jiffies = jiffies;
1178 static void mlx4_en_do_get_stats(struct work_struct *work)
1180 struct delayed_work *delay = to_delayed_work(work);
1181 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1183 struct mlx4_en_dev *mdev = priv->mdev;
1186 mutex_lock(&mdev->state_lock);
1187 if (mdev->device_up) {
1188 if (priv->port_up) {
1189 if (mlx4_is_slave(mdev->dev))
1190 err = mlx4_en_get_vport_stats(mdev, priv->port);
1192 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1194 en_dbg(HW, priv, "Could not update stats\n");
1196 mlx4_en_auto_moderation(priv);
1199 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1201 mutex_unlock(&mdev->state_lock);
1204 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1207 static void mlx4_en_service_task(struct work_struct *work)
1209 struct delayed_work *delay = to_delayed_work(work);
1210 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1212 struct mlx4_en_dev *mdev = priv->mdev;
1214 mutex_lock(&mdev->state_lock);
1215 if (mdev->device_up) {
1216 queue_delayed_work(mdev->workqueue, &priv->service_task,
1217 SERVICE_TASK_DELAY);
1219 mutex_unlock(&mdev->state_lock);
1222 static void mlx4_en_linkstate(struct work_struct *work)
1224 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1226 struct mlx4_en_dev *mdev = priv->mdev;
1227 int linkstate = priv->link_state;
1229 mutex_lock(&mdev->state_lock);
1230 /* If observable port state changed set carrier state and
1231 * report to system log */
1232 if (priv->last_link_state != linkstate) {
1233 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1234 en_info(priv, "Link Down\n");
1235 if_link_state_change(priv->dev, LINK_STATE_DOWN);
1236 /* update netif baudrate */
1237 priv->dev->if_baudrate = 0;
1239 /* make sure the port is up before notifying the OS.
1240 * This is tricky since we get here on INIT_PORT and
1241 * in such case we can't tell the OS the port is up.
1242 * To solve this there is a call to if_link_state_change
1245 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){
1246 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
1247 en_info(priv, "Query port failed\n");
1248 priv->dev->if_baudrate =
1249 IF_Mbps(priv->port_state.link_speed);
1250 en_info(priv, "Link Up\n");
1251 if_link_state_change(priv->dev, LINK_STATE_UP);
1254 priv->last_link_state = linkstate;
1255 mutex_unlock(&mdev->state_lock);
1259 int mlx4_en_start_port(struct net_device *dev)
1261 struct mlx4_en_priv *priv = netdev_priv(dev);
1262 struct mlx4_en_dev *mdev = priv->mdev;
1263 struct mlx4_en_cq *cq;
1264 struct mlx4_en_tx_ring *tx_ring;
1270 u8 mc_list[16] = {0};
1273 if (priv->port_up) {
1274 en_dbg(DRV, priv, "start port called while port already up\n");
1278 INIT_LIST_HEAD(&priv->mc_list);
1279 INIT_LIST_HEAD(&priv->uc_list);
1280 INIT_LIST_HEAD(&priv->curr_mc_list);
1281 INIT_LIST_HEAD(&priv->curr_uc_list);
1282 INIT_LIST_HEAD(&priv->ethtool_list);
1284 /* Calculate Rx buf size */
1285 dev->if_mtu = min(dev->if_mtu, priv->max_mtu);
1286 mlx4_en_calc_rx_buf(dev);
1287 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size);
1289 /* Configure rx cq's and rings */
1290 err = mlx4_en_activate_rx_rings(priv);
1292 en_err(priv, "Failed to activate RX rings\n");
1295 for (i = 0; i < priv->rx_ring_num; i++) {
1296 cq = priv->rx_cq[i];
1298 mlx4_en_cq_init_lock(cq);
1299 err = mlx4_en_activate_cq(priv, cq, i);
1301 en_err(priv, "Failed activating Rx CQ\n");
1304 for (j = 0; j < cq->size; j++)
1305 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1306 err = mlx4_en_set_cq_moder(priv, cq);
1308 en_err(priv, "Failed setting cq moderation parameters");
1309 mlx4_en_deactivate_cq(priv, cq);
1312 mlx4_en_arm_cq(priv, cq);
1313 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1318 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1319 err = mlx4_en_get_qp(priv);
1321 en_err(priv, "Failed getting eth qp\n");
1324 mdev->mac_removed[priv->port] = 0;
1326 priv->counter_index =
1327 mlx4_get_default_counter_index(mdev->dev, priv->port);
1329 err = mlx4_en_config_rss_steer(priv);
1331 en_err(priv, "Failed configuring rss steering\n");
1335 err = mlx4_en_create_drop_qp(priv);
1339 /* Configure tx cq's and rings */
1340 for (i = 0; i < priv->tx_ring_num; i++) {
1342 cq = priv->tx_cq[i];
1343 err = mlx4_en_activate_cq(priv, cq, i);
1345 en_err(priv, "Failed activating Tx CQ\n");
1348 err = mlx4_en_set_cq_moder(priv, cq);
1350 en_err(priv, "Failed setting cq moderation parameters");
1351 mlx4_en_deactivate_cq(priv, cq);
1354 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1355 cq->buf->wqe_index = cpu_to_be16(0xffff);
1357 /* Configure ring */
1358 tx_ring = priv->tx_ring[i];
1360 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1361 i / priv->num_tx_rings_p_up);
1363 en_err(priv, "Failed activating Tx ring %d\n", i);
1364 mlx4_en_deactivate_cq(priv, cq);
1368 /* Arm CQ for TX completions */
1369 mlx4_en_arm_cq(priv, cq);
1371 /* Set initial ownership of all Tx TXBBs to SW (1) */
1372 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1373 *((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT;
1377 /* Configure port */
1378 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1380 priv->prof->tx_pause,
1382 priv->prof->rx_pause,
1383 priv->prof->rx_ppp);
1385 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1389 /* Set default qp number */
1390 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1392 en_err(priv, "Failed setting default qp numbers\n");
1397 en_dbg(HW, priv, "Initializing port\n");
1398 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1400 en_err(priv, "Failed Initializing port\n");
1404 /* Attach rx QP to bradcast address */
1405 memset(&mc_list[10], 0xff, ETH_ALEN);
1406 mc_list[5] = priv->port; /* needed for B0 steering support */
1407 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1408 priv->port, 0, MLX4_PROT_ETH,
1409 &priv->broadcast_id))
1410 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1412 /* Must redo promiscuous mode setup. */
1413 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1415 /* Schedule multicast task to populate multicast list */
1416 queue_work(mdev->workqueue, &priv->rx_mode_task);
1418 priv->port_up = true;
1420 /* Enable the queues. */
1421 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1422 dev->if_drv_flags |= IFF_DRV_RUNNING;
1423 #ifdef CONFIG_DEBUG_FS
1424 mlx4_en_create_debug_files(priv);
1426 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
1427 mlx4_en_watchdog_timeout, priv);
1433 while (tx_index--) {
1434 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1435 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1437 mlx4_en_destroy_drop_qp(priv);
1439 mlx4_en_release_rss_steer(priv);
1441 mlx4_en_put_qp(priv);
1444 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1445 for (i = 0; i < priv->rx_ring_num; i++)
1446 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1448 return err; /* need to close devices */
1452 void mlx4_en_stop_port(struct net_device *dev)
1454 struct mlx4_en_priv *priv = netdev_priv(dev);
1455 struct mlx4_en_dev *mdev = priv->mdev;
1456 struct mlx4_en_addr_list *addr_list, *tmp;
1458 u8 mc_list[16] = {0};
1460 if (!priv->port_up) {
1461 en_dbg(DRV, priv, "stop port called while port already down\n");
1465 #ifdef CONFIG_DEBUG_FS
1466 mlx4_en_delete_debug_files(priv);
1470 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1472 /* Set port as not active */
1473 priv->port_up = false;
1474 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1476 /* Promsicuous mode */
1477 if (mdev->dev->caps.steering_mode ==
1478 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1479 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1480 MLX4_EN_FLAG_MC_PROMISC);
1481 mlx4_flow_steer_promisc_remove(mdev->dev,
1483 MLX4_FS_ALL_DEFAULT);
1484 mlx4_flow_steer_promisc_remove(mdev->dev,
1486 MLX4_FS_MC_DEFAULT);
1487 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1488 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1490 /* Disable promiscouos mode */
1491 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1494 /* Disable Multicast promisc */
1495 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1496 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1498 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1502 /* Detach All unicasts */
1503 list_for_each_entry(addr_list, &priv->curr_uc_list, list) {
1504 mlx4_en_uc_steer_release(priv, addr_list->addr,
1505 priv->rss_map.indir_qp.qpn,
1508 mlx4_en_clear_uclist(dev);
1509 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
1510 list_del(&addr_list->list);
1514 /* Detach All multicasts */
1515 memset(&mc_list[10], 0xff, ETH_ALEN);
1516 mc_list[5] = priv->port; /* needed for B0 steering support */
1517 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1518 MLX4_PROT_ETH, priv->broadcast_id);
1519 list_for_each_entry(addr_list, &priv->curr_mc_list, list) {
1520 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
1521 mc_list[5] = priv->port;
1522 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1523 mc_list, MLX4_PROT_ETH, addr_list->reg_id);
1525 mlx4_en_clear_mclist(dev);
1526 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
1527 list_del(&addr_list->list);
1531 /* Flush multicast filter */
1532 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1533 mlx4_en_destroy_drop_qp(priv);
1536 for (i = 0; i < priv->tx_ring_num; i++) {
1537 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1538 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1542 for (i = 0; i < priv->tx_ring_num; i++)
1543 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1546 mlx4_en_release_rss_steer(priv);
1548 /* Unregister Mac address for the port */
1549 mlx4_en_put_qp(priv);
1550 mdev->mac_removed[priv->port] = 1;
1553 for (i = 0; i < priv->rx_ring_num; i++) {
1554 struct mlx4_en_cq *cq = priv->rx_cq[i];
1555 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1556 mlx4_en_deactivate_cq(priv, cq);
1559 callout_stop(&priv->watchdog_timer);
1561 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1564 static void mlx4_en_restart(struct work_struct *work)
1566 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1568 struct mlx4_en_dev *mdev = priv->mdev;
1569 struct net_device *dev = priv->dev;
1570 struct mlx4_en_tx_ring *ring;
1574 if (priv->blocked == 0 || priv->port_up == 0)
1576 for (i = 0; i < priv->tx_ring_num; i++) {
1579 ring = priv->tx_ring[i];
1580 watchdog_time = READ_ONCE(ring->watchdog_time);
1581 if (watchdog_time != 0 &&
1582 time_after(ticks, ring->watchdog_time))
1588 priv->port_stats.tx_timeout++;
1589 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1591 mutex_lock(&mdev->state_lock);
1592 if (priv->port_up) {
1593 mlx4_en_stop_port(dev);
1594 //for (i = 0; i < priv->tx_ring_num; i++)
1595 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue);
1596 if (mlx4_en_start_port(dev))
1597 en_err(priv, "Failed restarting port %d\n", priv->port);
1599 mutex_unlock(&mdev->state_lock);
1602 static void mlx4_en_clear_stats(struct net_device *dev)
1604 struct mlx4_en_priv *priv = netdev_priv(dev);
1605 struct mlx4_en_dev *mdev = priv->mdev;
1608 if (!mlx4_is_slave(mdev->dev))
1609 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1610 en_dbg(HW, priv, "Failed dumping statistics\n");
1612 memset(&priv->pstats, 0, sizeof(priv->pstats));
1613 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1614 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1615 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats));
1617 for (i = 0; i < priv->tx_ring_num; i++) {
1618 priv->tx_ring[i]->bytes = 0;
1619 priv->tx_ring[i]->packets = 0;
1620 priv->tx_ring[i]->tx_csum = 0;
1621 priv->tx_ring[i]->oversized_packets = 0;
1623 for (i = 0; i < priv->rx_ring_num; i++) {
1624 priv->rx_ring[i]->bytes = 0;
1625 priv->rx_ring[i]->packets = 0;
1626 priv->rx_ring[i]->csum_ok = 0;
1627 priv->rx_ring[i]->csum_none = 0;
1631 static void mlx4_en_open(void* arg)
1634 struct mlx4_en_priv *priv;
1635 struct mlx4_en_dev *mdev;
1636 struct net_device *dev;
1644 mutex_lock(&mdev->state_lock);
1646 if (!mdev->device_up) {
1647 en_err(priv, "Cannot open - device down/disabled\n");
1651 /* Reset HW statistics and SW counters */
1652 mlx4_en_clear_stats(dev);
1654 err = mlx4_en_start_port(dev);
1656 en_err(priv, "Failed starting port:%d\n", priv->port);
1659 mutex_unlock(&mdev->state_lock);
1663 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1667 #ifdef CONFIG_RFS_ACCEL
1668 if (priv->dev->rx_cpu_rmap) {
1669 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1670 priv->dev->rx_cpu_rmap = NULL;
1674 for (i = 0; i < priv->tx_ring_num; i++) {
1675 if (priv->tx_ring && priv->tx_ring[i])
1676 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1677 if (priv->tx_cq && priv->tx_cq[i])
1678 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1681 for (i = 0; i < priv->rx_ring_num; i++) {
1682 if (priv->rx_ring[i])
1683 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1684 priv->prof->rx_ring_size);
1686 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1689 if (priv->stat_sysctl != NULL)
1690 sysctl_ctx_free(&priv->stat_ctx);
1693 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1695 struct mlx4_en_port_profile *prof = priv->prof;
1699 /* Create rx Rings */
1700 for (i = 0; i < priv->rx_ring_num; i++) {
1701 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1702 prof->rx_ring_size, i, RX, node))
1705 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1706 prof->rx_ring_size, node))
1710 /* Create tx Rings */
1711 for (i = 0; i < priv->tx_ring_num; i++) {
1712 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1713 prof->tx_ring_size, i, TX, node))
1716 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1717 prof->tx_ring_size, TXBB_SIZE, node, i))
1721 #ifdef CONFIG_RFS_ACCEL
1722 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
1723 if (!priv->dev->rx_cpu_rmap)
1726 /* Re-create stat sysctls in case the number of rings changed. */
1727 mlx4_en_sysctl_stat(priv);
1731 en_err(priv, "Failed to allocate NIC resources\n");
1732 for (i = 0; i < priv->rx_ring_num; i++) {
1733 if (priv->rx_ring[i])
1734 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1735 prof->rx_ring_size);
1737 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1739 for (i = 0; i < priv->tx_ring_num; i++) {
1740 if (priv->tx_ring[i])
1741 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1743 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1745 priv->port_up = false;
1749 struct en_port_attribute {
1750 struct attribute attr;
1751 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf);
1752 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count);
1755 #define PORT_ATTR_RO(_name) \
1756 struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name)
1758 #define EN_PORT_ATTR(_name, _mode, _show, _store) \
1759 struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store)
1761 void mlx4_en_destroy_netdev(struct net_device *dev)
1763 struct mlx4_en_priv *priv = netdev_priv(dev);
1764 struct mlx4_en_dev *mdev = priv->mdev;
1766 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
1768 /* don't allow more IOCTLs */
1771 /* XXX wait a bit to allow IOCTL handlers to complete */
1774 if (priv->vlan_attach != NULL)
1775 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
1776 if (priv->vlan_detach != NULL)
1777 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
1779 mutex_lock(&mdev->state_lock);
1780 mlx4_en_stop_port(dev);
1781 mutex_unlock(&mdev->state_lock);
1783 /* Unregister device - this will close the port if it was up */
1784 if (priv->registered)
1785 ether_ifdetach(dev);
1787 if (priv->allocated)
1788 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
1790 cancel_delayed_work(&priv->stats_task);
1791 cancel_delayed_work(&priv->service_task);
1792 /* flush any pending task for this netdev */
1793 flush_workqueue(mdev->workqueue);
1794 callout_drain(&priv->watchdog_timer);
1796 /* Detach the netdev so tasks would not attempt to access it */
1797 mutex_lock(&mdev->state_lock);
1798 mdev->pndev[priv->port] = NULL;
1799 mutex_unlock(&mdev->state_lock);
1802 mlx4_en_free_resources(priv);
1804 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */
1805 if (priv->conf_sysctl != NULL)
1806 sysctl_ctx_free(&priv->conf_ctx);
1808 kfree(priv->tx_ring);
1816 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1818 struct mlx4_en_priv *priv = netdev_priv(dev);
1819 struct mlx4_en_dev *mdev = priv->mdev;
1822 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n",
1823 (unsigned)dev->if_mtu, (unsigned)new_mtu);
1825 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
1826 en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu,
1830 mutex_lock(&mdev->state_lock);
1831 dev->if_mtu = new_mtu;
1832 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1833 if (!mdev->device_up) {
1834 /* NIC is probably restarting - let watchdog task reset
1836 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
1838 mlx4_en_stop_port(dev);
1839 err = mlx4_en_start_port(dev);
1841 en_err(priv, "Failed restarting port:%d\n",
1843 queue_work(mdev->workqueue, &priv->watchdog_task);
1847 mutex_unlock(&mdev->state_lock);
1851 static int mlx4_en_calc_media(struct mlx4_en_priv *priv)
1857 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN)
1860 trans_type = priv->port_state.transceiver;
1861 /* XXX I don't know all of the transceiver values. */
1862 switch (priv->port_state.link_speed) {
1864 active |= IFM_100_T;
1867 active |= IFM_1000_T;
1870 if (trans_type > 0 && trans_type <= 0xC)
1871 active |= IFM_10G_SR;
1872 else if (trans_type == 0x80 || trans_type == 0)
1873 active |= IFM_10G_CX4;
1876 active |= IFM_40G_CR4;
1879 if (priv->prof->tx_pause)
1880 active |= IFM_ETH_TXPAUSE;
1881 if (priv->prof->rx_pause)
1882 active |= IFM_ETH_RXPAUSE;
1887 static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
1889 struct mlx4_en_priv *priv;
1891 priv = dev->if_softc;
1892 ifmr->ifm_status = IFM_AVALID;
1893 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN)
1894 ifmr->ifm_status |= IFM_ACTIVE;
1895 ifmr->ifm_active = mlx4_en_calc_media(priv);
1900 static int mlx4_en_media_change(struct ifnet *dev)
1902 struct mlx4_en_priv *priv;
1903 struct ifmedia *ifm;
1908 priv = dev->if_softc;
1910 rxpause = txpause = 0;
1913 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1915 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1922 if ((IFM_SUBTYPE(ifm->ifm_media)
1923 == IFM_SUBTYPE(mlx4_en_calc_media(priv)))
1924 && (ifm->ifm_media & IFM_FDX))
1928 printf("%s: Only auto media type\n", if_name(dev));
1931 /* Allow user to set/clear pause */
1932 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
1934 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
1936 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) {
1937 priv->prof->tx_pause = txpause;
1938 priv->prof->rx_pause = rxpause;
1939 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
1940 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause,
1941 priv->prof->tx_ppp, priv->prof->rx_pause,
1942 priv->prof->rx_ppp);
1947 static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
1949 struct mlx4_en_priv *priv;
1950 struct mlx4_en_dev *mdev;
1954 struct ifrsskey *ifrk;
1956 struct ifrsshash *ifrh;
1961 priv = dev->if_softc;
1963 /* check if detaching */
1964 if (priv == NULL || priv->gone != 0)
1968 ifr = (struct ifreq *) data;
1972 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu);
1975 if (dev->if_flags & IFF_UP) {
1976 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1977 mutex_lock(&mdev->state_lock);
1978 mlx4_en_start_port(dev);
1979 mutex_unlock(&mdev->state_lock);
1981 mlx4_en_set_rx_mode(dev);
1984 mutex_lock(&mdev->state_lock);
1985 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1986 mlx4_en_stop_port(dev);
1987 if_link_state_change(dev, LINK_STATE_DOWN);
1989 mutex_unlock(&mdev->state_lock);
1994 mlx4_en_set_rx_mode(dev);
1998 error = ifmedia_ioctl(dev, ifr, &priv->media, command);
2001 mutex_lock(&mdev->state_lock);
2002 mask = ifr->ifr_reqcap ^ dev->if_capenable;
2003 if (mask & IFCAP_TXCSUM) {
2004 dev->if_capenable ^= IFCAP_TXCSUM;
2005 dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2007 if (IFCAP_TSO4 & dev->if_capenable &&
2008 !(IFCAP_TXCSUM & dev->if_capenable)) {
2009 dev->if_capenable &= ~IFCAP_TSO4;
2010 dev->if_hwassist &= ~CSUM_IP_TSO;
2012 "tso4 disabled due to -txcsum.\n");
2015 if (mask & IFCAP_TXCSUM_IPV6) {
2016 dev->if_capenable ^= IFCAP_TXCSUM_IPV6;
2017 dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2019 if (IFCAP_TSO6 & dev->if_capenable &&
2020 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
2021 dev->if_capenable &= ~IFCAP_TSO6;
2022 dev->if_hwassist &= ~CSUM_IP6_TSO;
2024 "tso6 disabled due to -txcsum6.\n");
2027 if (mask & IFCAP_RXCSUM)
2028 dev->if_capenable ^= IFCAP_RXCSUM;
2029 if (mask & IFCAP_RXCSUM_IPV6)
2030 dev->if_capenable ^= IFCAP_RXCSUM_IPV6;
2032 if (mask & IFCAP_TSO4) {
2033 if (!(IFCAP_TSO4 & dev->if_capenable) &&
2034 !(IFCAP_TXCSUM & dev->if_capenable)) {
2035 if_printf(dev, "enable txcsum first.\n");
2039 dev->if_capenable ^= IFCAP_TSO4;
2040 dev->if_hwassist ^= CSUM_IP_TSO;
2042 if (mask & IFCAP_TSO6) {
2043 if (!(IFCAP_TSO6 & dev->if_capenable) &&
2044 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
2045 if_printf(dev, "enable txcsum6 first.\n");
2049 dev->if_capenable ^= IFCAP_TSO6;
2050 dev->if_hwassist ^= CSUM_IP6_TSO;
2052 if (mask & IFCAP_LRO)
2053 dev->if_capenable ^= IFCAP_LRO;
2054 if (mask & IFCAP_VLAN_HWTAGGING)
2055 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2056 if (mask & IFCAP_VLAN_HWFILTER)
2057 dev->if_capenable ^= IFCAP_VLAN_HWFILTER;
2058 if (mask & IFCAP_WOL_MAGIC)
2059 dev->if_capenable ^= IFCAP_WOL_MAGIC;
2060 if (dev->if_drv_flags & IFF_DRV_RUNNING)
2061 mlx4_en_start_port(dev);
2063 mutex_unlock(&mdev->state_lock);
2064 VLAN_CAPABILITIES(dev);
2066 #if __FreeBSD_version >= 1100036
2068 struct ifi2creq i2c;
2070 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2073 if (i2c.len > sizeof(i2c.data)) {
2078 * Note that we ignore i2c.addr here. The driver hardcodes
2079 * the address to 0x50, while standard expects it to be 0xA0.
2081 error = mlx4_get_module_info(mdev->dev, priv->port,
2082 i2c.offset, i2c.len, i2c.data);
2087 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2092 ifrk = (struct ifrsskey *)data;
2093 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
2094 mutex_lock(&mdev->state_lock);
2095 key = mlx4_en_get_rss_key(priv, &ifrk->ifrk_keylen);
2096 if (ifrk->ifrk_keylen > RSS_KEYLEN)
2099 memcpy(ifrk->ifrk_key, key, ifrk->ifrk_keylen);
2100 mutex_unlock(&mdev->state_lock);
2103 case SIOCGIFRSSHASH:
2104 mutex_lock(&mdev->state_lock);
2105 rss_mask = mlx4_en_get_rss_mask(priv);
2106 mutex_unlock(&mdev->state_lock);
2107 ifrh = (struct ifrsshash *)data;
2108 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
2109 ifrh->ifrh_types = 0;
2110 if (rss_mask & MLX4_RSS_IPV4)
2111 ifrh->ifrh_types |= RSS_TYPE_IPV4;
2112 if (rss_mask & MLX4_RSS_TCP_IPV4)
2113 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV4;
2114 if (rss_mask & MLX4_RSS_IPV6)
2115 ifrh->ifrh_types |= RSS_TYPE_IPV6;
2116 if (rss_mask & MLX4_RSS_TCP_IPV6)
2117 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV6;
2118 if (rss_mask & MLX4_RSS_UDP_IPV4)
2119 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV4;
2120 if (rss_mask & MLX4_RSS_UDP_IPV6)
2121 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV6;
2125 error = ether_ioctl(dev, command, data);
2133 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2134 struct mlx4_en_port_profile *prof)
2136 struct net_device *dev;
2137 struct mlx4_en_priv *priv;
2138 uint8_t dev_addr[ETHER_ADDR_LEN];
2142 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2143 dev = priv->dev = if_alloc(IFT_ETHER);
2145 en_err(priv, "Net device allocation failed\n");
2149 dev->if_softc = priv;
2150 if_initname(dev, "mlxen", (device_get_unit(
2151 mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1);
2152 dev->if_mtu = ETHERMTU;
2153 dev->if_init = mlx4_en_open;
2154 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2155 dev->if_ioctl = mlx4_en_ioctl;
2156 dev->if_transmit = mlx4_en_transmit;
2157 dev->if_qflush = mlx4_en_qflush;
2158 dev->if_snd.ifq_maxlen = prof->tx_ring_size;
2161 * Initialize driver private data
2163 priv->counter_index = 0xff;
2164 spin_lock_init(&priv->stats_lock);
2165 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2166 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2167 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2168 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2169 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2170 callout_init(&priv->watchdog_timer, 1);
2171 #ifdef CONFIG_RFS_ACCEL
2172 INIT_LIST_HEAD(&priv->filters);
2173 spin_lock_init(&priv->filters_lock);
2176 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2179 priv->ddev = &mdev->pdev->dev;
2182 priv->port_up = false;
2183 priv->flags = prof->flags;
2185 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2186 priv->tx_ring_num = prof->tx_ring_num;
2187 priv->tx_ring = kcalloc(MAX_TX_RINGS,
2188 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL);
2189 if (!priv->tx_ring) {
2193 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS,
2200 priv->rx_ring_num = prof->rx_ring_num;
2201 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2202 priv->mac_index = -1;
2203 priv->last_ifq_jiffies = 0;
2204 priv->if_counters_rx_errors = 0;
2205 priv->if_counters_rx_no_buffer = 0;
2206 #ifdef CONFIG_MLX4_EN_DCB
2207 if (!mlx4_is_slave(priv->mdev->dev)) {
2208 priv->dcbx_cap = DCB_CAP_DCBX_HOST;
2209 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
2210 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2211 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2213 en_info(priv, "QoS disabled - no HW support\n");
2214 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2219 /* Query for default mac and max mtu */
2220 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2221 priv->mac = mdev->dev->caps.def_mac[priv->port];
2222 if (ILLEGAL_MAC(priv->mac)) {
2223 #if BITS_PER_LONG == 64
2224 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n",
2225 priv->port, priv->mac);
2226 #elif BITS_PER_LONG == 32
2227 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
2228 priv->port, priv->mac);
2234 mlx4_en_sysctl_conf(priv);
2236 err = mlx4_en_alloc_resources(priv);
2240 /* Allocate page for receive rings */
2241 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2242 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2244 en_err(priv, "Failed to allocate page for rx qps\n");
2247 priv->allocated = 1;
2250 * Set driver features
2252 dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
2253 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
2254 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
2255 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
2256 dev->if_capabilities |= IFCAP_LRO;
2257 dev->if_capabilities |= IFCAP_HWSTATS;
2259 if (mdev->LSO_support)
2260 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO;
2262 #if __FreeBSD_version >= 1100000
2263 /* set TSO limits so that we don't have to drop TX packets */
2264 dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */;
2265 dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */;
2266 dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE;
2269 dev->if_capenable = dev->if_capabilities;
2271 dev->if_hwassist = 0;
2272 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6))
2273 dev->if_hwassist |= CSUM_TSO;
2274 if (dev->if_capenable & IFCAP_TXCSUM)
2275 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2276 if (dev->if_capenable & IFCAP_TXCSUM_IPV6)
2277 dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2280 /* Register for VLAN events */
2281 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
2282 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
2283 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
2284 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
2286 mdev->pndev[priv->port] = dev;
2288 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN;
2289 mlx4_en_set_default_moderation(priv);
2291 /* Set default MAC */
2292 for (i = 0; i < ETHER_ADDR_LEN; i++)
2293 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i));
2296 ether_ifattach(dev, dev_addr);
2297 if_link_state_change(dev, LINK_STATE_DOWN);
2298 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
2299 mlx4_en_media_change, mlx4_en_media_status);
2300 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL);
2301 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL);
2302 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL);
2303 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL);
2304 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2305 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
2307 DEBUGNET_SET(dev, mlx4_en);
2309 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2310 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2312 priv->registered = 1;
2314 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2315 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2318 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
2319 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2321 prof->tx_pause, prof->tx_ppp,
2322 prof->rx_pause, prof->rx_ppp);
2324 en_err(priv, "Failed setting port general configurations "
2325 "for port %d, with error %d\n", priv->port, err);
2330 en_warn(priv, "Initializing port\n");
2331 err = mlx4_INIT_PORT(mdev->dev, priv->port);
2333 en_err(priv, "Failed Initializing port\n");
2337 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2339 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2340 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY);
2345 mlx4_en_destroy_netdev(dev);
2349 static int mlx4_en_set_ring_size(struct net_device *dev,
2350 int rx_size, int tx_size)
2352 struct mlx4_en_priv *priv = netdev_priv(dev);
2353 struct mlx4_en_dev *mdev = priv->mdev;
2357 rx_size = roundup_pow_of_two(rx_size);
2358 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
2359 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
2360 tx_size = roundup_pow_of_two(tx_size);
2361 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
2362 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
2364 if (rx_size == (priv->port_up ?
2365 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) &&
2366 tx_size == priv->tx_ring[0]->size)
2368 mutex_lock(&mdev->state_lock);
2369 if (priv->port_up) {
2371 mlx4_en_stop_port(dev);
2373 mlx4_en_free_resources(priv);
2374 priv->prof->tx_ring_size = tx_size;
2375 priv->prof->rx_ring_size = rx_size;
2376 err = mlx4_en_alloc_resources(priv);
2378 en_err(priv, "Failed reallocating port resources\n");
2382 err = mlx4_en_start_port(dev);
2384 en_err(priv, "Failed starting port\n");
2387 mutex_unlock(&mdev->state_lock);
2390 static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS)
2392 struct mlx4_en_priv *priv;
2397 size = priv->prof->rx_ring_size;
2398 error = sysctl_handle_int(oidp, &size, 0, req);
2399 if (error || !req->newptr)
2401 error = -mlx4_en_set_ring_size(priv->dev, size,
2402 priv->prof->tx_ring_size);
2406 static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS)
2408 struct mlx4_en_priv *priv;
2413 size = priv->prof->tx_ring_size;
2414 error = sysctl_handle_int(oidp, &size, 0, req);
2415 if (error || !req->newptr)
2417 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size,
2423 static int mlx4_en_get_module_info(struct net_device *dev,
2424 struct ethtool_modinfo *modinfo)
2426 struct mlx4_en_priv *priv = netdev_priv(dev);
2427 struct mlx4_en_dev *mdev = priv->mdev;
2431 /* Read first 2 bytes to get Module & REV ID */
2432 ret = mlx4_get_module_info(mdev->dev, priv->port,
2433 0/*offset*/, 2/*size*/, data);
2436 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret);
2440 switch (data[0] /* identifier */) {
2441 case MLX4_MODULE_ID_QSFP:
2442 modinfo->type = ETH_MODULE_SFF_8436;
2443 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2445 case MLX4_MODULE_ID_QSFP_PLUS:
2446 if (data[1] >= 0x3) { /* revision id */
2447 modinfo->type = ETH_MODULE_SFF_8636;
2448 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2450 modinfo->type = ETH_MODULE_SFF_8436;
2451 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2454 case MLX4_MODULE_ID_QSFP28:
2455 modinfo->type = ETH_MODULE_SFF_8636;
2456 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2458 case MLX4_MODULE_ID_SFP:
2459 modinfo->type = ETH_MODULE_SFF_8472;
2460 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2463 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n");
2470 static int mlx4_en_get_module_eeprom(struct net_device *dev,
2471 struct ethtool_eeprom *ee,
2474 struct mlx4_en_priv *priv = netdev_priv(dev);
2475 struct mlx4_en_dev *mdev = priv->mdev;
2476 int offset = ee->offset;
2482 memset(data, 0, ee->len);
2484 while (i < ee->len) {
2486 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
2487 i, offset, ee->len - i);
2489 ret = mlx4_get_module_info(mdev->dev, priv->port,
2490 offset, ee->len - i, data + i);
2492 if (!ret) /* Done reading */
2497 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
2498 i, offset, ee->len - i, ret);
2508 static void mlx4_en_print_eeprom(u8 *data, __u32 len)
2513 const int NUM_OF_BYTES = 16;
2515 printf("\nOffset\t\tValues\n");
2516 printf("------\t\t------\n");
2518 printf("0x%04x\t\t",row);
2519 for(i=0; i < NUM_OF_BYTES; i++){
2520 printf("%02x ", data[j]);
2528 /* Read cable EEPROM module information by first inspecting the first
2529 * two bytes to get the length and then read the rest of the information.
2530 * The information is printed to dmesg. */
2531 static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS)
2537 struct mlx4_en_priv *priv;
2538 struct net_device *dev;
2539 struct ethtool_modinfo modinfo;
2540 struct ethtool_eeprom ee;
2542 error = sysctl_handle_int(oidp, &result, 0, req);
2543 if (error || !req->newptr)
2549 data = kmalloc(PAGE_SIZE, GFP_KERNEL);
2551 error = mlx4_en_get_module_info(dev, &modinfo);
2554 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n",
2559 ee.len = modinfo.eeprom_len;
2562 error = mlx4_en_get_module_eeprom(dev, &ee, data);
2565 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n",
2567 /* Continue printing partial information in case of an error */
2570 /* EEPROM information will be printed in dmesg */
2571 mlx4_en_print_eeprom(data, ee.len);
2575 /* Return zero to prevent sysctl failure. */
2579 static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS)
2581 struct mlx4_en_priv *priv;
2586 ppp = priv->prof->tx_ppp;
2587 error = sysctl_handle_int(oidp, &ppp, 0, req);
2588 if (error || !req->newptr)
2590 if (ppp > 0xff || ppp < 0)
2592 priv->prof->tx_ppp = ppp;
2593 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2594 priv->rx_mb_size + ETHER_CRC_LEN,
2595 priv->prof->tx_pause,
2597 priv->prof->rx_pause,
2598 priv->prof->rx_ppp);
2603 static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS)
2605 struct mlx4_en_priv *priv;
2606 struct mlx4_en_dev *mdev;
2614 ppp = priv->prof->rx_ppp;
2615 error = sysctl_handle_int(oidp, &ppp, 0, req);
2616 if (error || !req->newptr)
2618 if (ppp > 0xff || ppp < 0)
2620 /* See if we have to change the number of tx queues. */
2621 if (!ppp != !priv->prof->rx_ppp) {
2622 mutex_lock(&mdev->state_lock);
2623 if (priv->port_up) {
2625 mlx4_en_stop_port(priv->dev);
2627 mlx4_en_free_resources(priv);
2628 priv->prof->rx_ppp = ppp;
2629 error = -mlx4_en_alloc_resources(priv);
2631 en_err(priv, "Failed reallocating port resources\n");
2632 if (error == 0 && port_up) {
2633 error = -mlx4_en_start_port(priv->dev);
2635 en_err(priv, "Failed starting port\n");
2637 mutex_unlock(&mdev->state_lock);
2641 priv->prof->rx_ppp = ppp;
2642 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
2643 priv->rx_mb_size + ETHER_CRC_LEN,
2644 priv->prof->tx_pause,
2646 priv->prof->rx_pause,
2647 priv->prof->rx_ppp);
2652 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
2654 struct net_device *dev;
2655 struct sysctl_ctx_list *ctx;
2656 struct sysctl_oid *node;
2657 struct sysctl_oid_list *node_list;
2658 struct sysctl_oid *coal;
2659 struct sysctl_oid_list *coal_list;
2660 const char *pnameunit;
2662 ctx = &priv->conf_ctx;
2663 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev);
2665 sysctl_ctx_init(ctx);
2666 priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw),
2667 OID_AUTO, dev->if_xname, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2668 "mlx4 10gig ethernet");
2669 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO,
2670 "conf", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Configuration");
2671 node_list = SYSCTL_CHILDREN(node);
2673 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable",
2674 CTLFLAG_RW, &priv->msg_enable, 0,
2675 "Driver message enable bitfield");
2676 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings",
2677 CTLFLAG_RD, &priv->rx_ring_num, 0,
2678 "Number of receive rings");
2679 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings",
2680 CTLFLAG_RD, &priv->tx_ring_num, 0,
2681 "Number of transmit rings");
2682 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size",
2683 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2684 mlx4_en_set_rx_ring_size, "I", "Receive ring size");
2685 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size",
2686 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2687 mlx4_en_set_tx_ring_size, "I", "Transmit ring size");
2688 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp",
2689 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2690 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause");
2691 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp",
2692 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2693 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause");
2694 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num",
2695 CTLFLAG_RD, &priv->port, 0,
2697 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name",
2698 CTLFLAG_RD, __DECONST(void *, pnameunit), 0,
2700 /* Add coalescer configuration. */
2701 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO,
2702 "coalesce", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2703 "Interrupt coalesce configuration");
2704 coal_list = SYSCTL_CHILDREN(coal);
2705 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low",
2706 CTLFLAG_RW, &priv->pkt_rate_low, 0,
2707 "Packets per-second for minimum delay");
2708 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low",
2709 CTLFLAG_RW, &priv->rx_usecs_low, 0,
2710 "Minimum RX delay in micro-seconds");
2711 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high",
2712 CTLFLAG_RW, &priv->pkt_rate_high, 0,
2713 "Packets per-second for maximum delay");
2714 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high",
2715 CTLFLAG_RW, &priv->rx_usecs_high, 0,
2716 "Maximum RX delay in micro-seconds");
2717 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval",
2718 CTLFLAG_RW, &priv->sample_interval, 0,
2719 "adaptive frequency in units of HZ ticks");
2720 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal",
2721 CTLFLAG_RW, &priv->adaptive_rx_coal, 0,
2722 "Enable adaptive rx coalescing");
2723 /* EEPROM support */
2724 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info",
2725 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
2726 mlx4_en_read_eeprom, "I", "EEPROM information");
2729 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
2731 struct sysctl_ctx_list *ctx;
2732 struct sysctl_oid_list *node_list;
2733 struct sysctl_oid *ring_node;
2734 struct sysctl_oid_list *ring_list;
2735 struct mlx4_en_tx_ring *tx_ring;
2736 struct mlx4_en_rx_ring *rx_ring;
2740 ctx = &priv->stat_ctx;
2741 sysctl_ctx_init(ctx);
2742 priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO,
2743 "stat", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
2744 node_list = SYSCTL_CHILDREN(priv->stat_sysctl);
2746 #ifdef MLX4_EN_PERF_STAT
2747 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD,
2748 &priv->pstats.tx_poll, "TX Poll calls");
2749 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD,
2750 &priv->pstats.tx_pktsz_avg, "TX average packet size");
2751 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD,
2752 &priv->pstats.inflight_avg, "TX average packets in-flight");
2753 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD,
2754 &priv->pstats.tx_coal_avg, "TX average coalesced completions");
2755 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD,
2756 &priv->pstats.rx_coal_avg, "RX average coalesced completions");
2759 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD,
2760 &priv->port_stats.tso_packets, 0, "TSO packets sent");
2761 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD,
2762 &priv->port_stats.queue_stopped, 0, "Queue full");
2763 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD,
2764 &priv->port_stats.wake_queue, 0, "Queue resumed after full");
2765 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
2766 &priv->port_stats.tx_timeout, 0, "Transmit timeouts");
2767 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD,
2768 &priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed");
2769 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
2770 &priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf");
2771 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
2772 &priv->port_stats.rx_chksum_good, 0, "RX checksum offload success");
2773 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD,
2774 &priv->port_stats.rx_chksum_none, 0, "RX without checksum offload");
2775 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload",
2776 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0,
2777 "TX checksum offloads");
2778 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts",
2779 CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0,
2780 "Oversized chains defragged");
2782 /* Could strdup the names and add in a loop. This is simpler. */
2783 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
2784 &priv->pkstats.rx_bytes, 0, "RX Bytes");
2785 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD,
2786 &priv->pkstats.rx_packets, 0, "RX packets");
2787 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD,
2788 &priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets");
2789 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD,
2790 &priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets");
2791 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD,
2792 &priv->pkstats.rx_errors, 0, "RX Errors");
2793 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD,
2794 &priv->pkstats.rx_dropped, 0, "RX Dropped");
2795 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD,
2796 &priv->pkstats.rx_length_errors, 0, "RX Length Errors");
2797 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD,
2798 &priv->pkstats.rx_over_errors, 0, "RX Over Errors");
2799 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
2800 &priv->pkstats.rx_crc_errors, 0, "RX CRC Errors");
2801 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD,
2802 &priv->pkstats.rx_jabbers, 0, "RX Jabbers");
2804 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD,
2805 &priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error");
2806 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error",
2807 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0,
2808 "RX Out Range Length Error");
2809 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD,
2810 &priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets");
2811 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD,
2812 &priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets");
2813 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD,
2814 &priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets");
2815 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD,
2816 &priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets");
2817 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD,
2818 &priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets");
2819 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD,
2820 &priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets");
2821 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD,
2822 &priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets");
2823 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD,
2824 &priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets");
2825 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD,
2826 &priv->pkstats.rx_gt_1548_bytes_packets, 0,
2827 "RX Greater Then 1548 bytes Packets");
2829 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
2830 &priv->pkstats.tx_packets, 0, "TX packets");
2831 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
2832 &priv->pkstats.tx_bytes, 0, "TX Bytes");
2833 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
2834 &priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets");
2835 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
2836 &priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets");
2837 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD,
2838 &priv->pkstats.tx_errors, 0, "TX Errors");
2839 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD,
2840 &priv->pkstats.tx_dropped, 0, "TX Dropped");
2841 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD,
2842 &priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets");
2843 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD,
2844 &priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets");
2845 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD,
2846 &priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets");
2847 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD,
2848 &priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets");
2849 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD,
2850 &priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets");
2851 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD,
2852 &priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets");
2853 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD,
2854 &priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets");
2855 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD,
2856 &priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets");
2857 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD,
2858 &priv->pkstats.tx_gt_1548_bytes_packets, 0,
2859 "TX Greater Then 1548 Bytes Packets");
2861 for (i = 0; i < priv->tx_ring_num; i++) {
2862 tx_ring = priv->tx_ring[i];
2863 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i);
2864 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2865 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Ring");
2866 ring_list = SYSCTL_CHILDREN(ring_node);
2867 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
2868 CTLFLAG_RD, &tx_ring->packets, 0, "TX packets");
2869 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
2870 CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes");
2871 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets",
2872 CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets");
2873 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts",
2874 CTLFLAG_RD, &tx_ring->defrag_attempts, 0,
2875 "Oversized chains defragged");
2878 for (i = 0; i < priv->rx_ring_num; i++) {
2879 rx_ring = priv->rx_ring[i];
2880 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i);
2881 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
2882 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Ring");
2883 ring_list = SYSCTL_CHILDREN(ring_node);
2884 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
2885 CTLFLAG_RD, &rx_ring->packets, 0, "RX packets");
2886 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
2887 CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes");
2888 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error",
2889 CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors");
2895 mlx4_en_debugnet_init(struct ifnet *dev, int *nrxr, int *ncl, int *clsize)
2897 struct mlx4_en_priv *priv;
2899 priv = if_getsoftc(dev);
2900 mutex_lock(&priv->mdev->state_lock);
2901 *nrxr = priv->rx_ring_num;
2902 *ncl = DEBUGNET_MAX_IN_FLIGHT;
2903 *clsize = priv->rx_mb_size;
2904 mutex_unlock(&priv->mdev->state_lock);
2908 mlx4_en_debugnet_event(struct ifnet *dev, enum debugnet_ev event)
2913 mlx4_en_debugnet_transmit(struct ifnet *dev, struct mbuf *m)
2915 struct mlx4_en_priv *priv;
2918 priv = if_getsoftc(dev);
2919 if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2920 IFF_DRV_RUNNING || !priv->link_state)
2923 err = mlx4_en_xmit(priv, 0, &m);
2924 if (err != 0 && m != NULL)
2930 mlx4_en_debugnet_poll(struct ifnet *dev, int count)
2932 struct mlx4_en_priv *priv;
2934 priv = if_getsoftc(dev);
2935 if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0 || !priv->link_state)
2938 mlx4_poll_interrupts(priv->mdev->dev);
2942 #endif /* DEBUGNET */