2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/string.h>
35 #include <linux/etherdevice.h>
37 #include <linux/mlx4/cmd.h>
38 #include <linux/module.h>
39 #include <linux/printk.h>
43 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
45 return 1 << dev->oper_log_mgm_entry_size;
48 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
50 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
53 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
54 struct mlx4_cmd_mailbox *mailbox,
61 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
62 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
71 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
75 err = mlx4_cmd(dev, regid, 0, 0,
76 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
82 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
83 struct mlx4_cmd_mailbox *mailbox)
85 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
86 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
89 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
90 struct mlx4_cmd_mailbox *mailbox)
92 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
93 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
96 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
97 struct mlx4_cmd_mailbox *mailbox)
101 in_mod = (u32) port << 16 | steer << 1;
102 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
103 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
107 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
108 u16 *hash, u8 op_mod)
113 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
114 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
123 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
124 enum mlx4_steer_type steer,
127 struct mlx4_steer *s_steer;
128 struct mlx4_promisc_qp *pqp;
130 if (port < 1 || port > dev->caps.num_ports)
133 s_steer = &mlx4_priv(dev)->steer[port - 1];
135 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
144 * Add new entry to steering data structure.
145 * All promisc QPs should be added as well
147 static int new_steering_entry(struct mlx4_dev *dev, u8 port,
148 enum mlx4_steer_type steer,
149 unsigned int index, u32 qpn)
151 struct mlx4_steer *s_steer;
152 struct mlx4_cmd_mailbox *mailbox;
153 struct mlx4_mgm *mgm;
155 struct mlx4_steer_index *new_entry;
156 struct mlx4_promisc_qp *pqp;
157 struct mlx4_promisc_qp *dqp = NULL;
161 if (port < 1 || port > dev->caps.num_ports)
164 s_steer = &mlx4_priv(dev)->steer[port - 1];
165 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
169 INIT_LIST_HEAD(&new_entry->duplicates);
170 new_entry->index = index;
171 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
173 /* If the given qpn is also a promisc qp,
174 * it should be inserted to duplicates list
176 pqp = get_promisc_qp(dev, port, steer, qpn);
178 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
184 list_add_tail(&dqp->list, &new_entry->duplicates);
187 /* if no promisc qps for this vep, we are done */
188 if (list_empty(&s_steer->promisc_qps[steer]))
191 /* now need to add all the promisc qps to the new
192 * steering entry, as they should also receive the packets
193 * destined to this address */
194 mailbox = mlx4_alloc_cmd_mailbox(dev);
195 if (IS_ERR(mailbox)) {
201 err = mlx4_READ_ENTRY(dev, index, mailbox);
205 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
206 prot = be32_to_cpu(mgm->members_count) >> 30;
207 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
208 /* don't add already existing qpn */
211 if (members_count == dev->caps.num_qp_per_mgm) {
218 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
220 /* update the qps count and update the entry with all the promisc qps*/
221 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
222 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
225 mlx4_free_cmd_mailbox(dev, mailbox);
230 list_del(&dqp->list);
233 list_del(&new_entry->list);
238 /* update the data structures with existing steering entry */
239 static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
240 enum mlx4_steer_type steer,
241 unsigned int index, u32 qpn)
243 struct mlx4_steer *s_steer;
244 struct mlx4_steer_index *tmp_entry, *entry = NULL;
245 struct mlx4_promisc_qp *pqp;
246 struct mlx4_promisc_qp *dqp;
248 if (port < 1 || port > dev->caps.num_ports)
251 s_steer = &mlx4_priv(dev)->steer[port - 1];
253 pqp = get_promisc_qp(dev, port, steer, qpn);
255 return 0; /* nothing to do */
257 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
258 if (tmp_entry->index == index) {
263 if (unlikely(!entry)) {
264 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
268 /* the given qpn is listed as a promisc qpn
269 * we need to add it as a duplicate to this entry
270 * for future references */
271 list_for_each_entry(dqp, &entry->duplicates, list) {
273 return 0; /* qp is already duplicated */
276 /* add the qp as a duplicate on this index */
277 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
281 list_add_tail(&dqp->list, &entry->duplicates);
286 /* Check whether a qpn is a duplicate on steering entry
287 * If so, it should not be removed from mgm */
288 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
289 enum mlx4_steer_type steer,
290 unsigned int index, u32 qpn)
292 struct mlx4_steer *s_steer;
293 struct mlx4_steer_index *tmp_entry, *entry = NULL;
294 struct mlx4_promisc_qp *dqp, *tmp_dqp;
296 if (port < 1 || port > dev->caps.num_ports)
299 s_steer = &mlx4_priv(dev)->steer[port - 1];
301 /* if qp is not promisc, it cannot be duplicated */
302 if (!get_promisc_qp(dev, port, steer, qpn))
305 /* The qp is promisc qp so it is a duplicate on this index
306 * Find the index entry, and remove the duplicate */
307 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
308 if (tmp_entry->index == index) {
313 if (unlikely(!entry)) {
314 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
317 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
318 if (dqp->qpn == qpn) {
319 list_del(&dqp->list);
327 * returns true if all the QPs != tqpn contained in this entry
328 * are Promisc QPs. return false otherwise.
330 static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
331 enum mlx4_steer_type steer,
332 unsigned int index, u32 tqpn, u32 *members_count)
334 struct mlx4_steer *s_steer;
335 struct mlx4_cmd_mailbox *mailbox;
336 struct mlx4_mgm *mgm;
341 if (port < 1 || port > dev->caps.num_ports)
344 s_steer = &mlx4_priv(dev)->steer[port - 1];
346 mailbox = mlx4_alloc_cmd_mailbox(dev);
351 if (mlx4_READ_ENTRY(dev, index, mailbox))
353 m_count = be32_to_cpu(mgm->members_count) & 0xffffff;
355 *members_count = m_count;
357 for (i = 0; i < m_count; i++) {
358 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
359 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
360 /* the qp is not promisc, the entry can't be removed */
366 mlx4_free_cmd_mailbox(dev, mailbox);
370 /* IF a steering entry contains only promisc QPs, it can be removed. */
371 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
372 enum mlx4_steer_type steer,
373 unsigned int index, u32 tqpn)
375 struct mlx4_steer *s_steer;
376 struct mlx4_steer_index *entry = NULL, *tmp_entry;
380 if (port < 1 || port > dev->caps.num_ports)
383 s_steer = &mlx4_priv(dev)->steer[port - 1];
385 if (!promisc_steering_entry(dev, port, steer, index, tqpn, &members_count))
388 /* All the qps currently registered for this entry are promiscuous,
389 * Checking for duplicates */
391 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
392 if (entry->index == index) {
393 if (list_empty(&entry->duplicates) || members_count == 1) {
394 struct mlx4_promisc_qp *pqp, *tmp_pqp;
396 * If there is only 1 entry in duplicates than
397 * this is the QP we want to delete, going over
398 * the list and deleting the entry.
400 list_del(&entry->list);
401 list_for_each_entry_safe(pqp, tmp_pqp,
404 list_del(&pqp->list);
409 /* This entry contains duplicates so it shouldn't be removed */
420 static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
421 enum mlx4_steer_type steer, u32 qpn)
423 struct mlx4_steer *s_steer;
424 struct mlx4_cmd_mailbox *mailbox;
425 struct mlx4_mgm *mgm;
426 struct mlx4_steer_index *entry;
427 struct mlx4_promisc_qp *pqp;
428 struct mlx4_promisc_qp *dqp;
434 struct mlx4_priv *priv = mlx4_priv(dev);
436 if (port < 1 || port > dev->caps.num_ports)
439 s_steer = &mlx4_priv(dev)->steer[port - 1];
441 mutex_lock(&priv->mcg_table.mutex);
443 if (get_promisc_qp(dev, port, steer, qpn)) {
444 err = 0; /* Noting to do, already exists */
448 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
455 mailbox = mlx4_alloc_cmd_mailbox(dev);
456 if (IS_ERR(mailbox)) {
462 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
463 /* the promisc qp needs to be added for each one of the steering
464 * entries, if it already exists, needs to be added as a duplicate
466 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
467 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
471 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
472 prot = be32_to_cpu(mgm->members_count) >> 30;
474 for (i = 0; i < members_count; i++) {
475 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
476 /* Entry already exists, add to duplicates */
477 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
483 list_add_tail(&dqp->list, &entry->duplicates);
488 /* Need to add the qpn to mgm */
489 if (members_count == dev->caps.num_qp_per_mgm) {
494 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
495 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
496 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
503 /* add the new qpn to list of promisc qps */
504 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
505 /* now need to add all the promisc qps to default entry */
506 memset(mgm, 0, sizeof *mgm);
508 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) {
509 if (members_count == dev->caps.num_qp_per_mgm) {
514 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
516 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
518 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
522 mlx4_free_cmd_mailbox(dev, mailbox);
523 mutex_unlock(&priv->mcg_table.mutex);
527 list_del(&pqp->list);
529 mlx4_free_cmd_mailbox(dev, mailbox);
533 mutex_unlock(&priv->mcg_table.mutex);
537 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
538 enum mlx4_steer_type steer, u32 qpn)
540 struct mlx4_priv *priv = mlx4_priv(dev);
541 struct mlx4_steer *s_steer;
542 struct mlx4_cmd_mailbox *mailbox;
543 struct mlx4_mgm *mgm;
544 struct mlx4_steer_index *entry, *tmp_entry;
545 struct mlx4_promisc_qp *pqp;
546 struct mlx4_promisc_qp *dqp;
549 bool back_to_list = false;
553 if (port < 1 || port > dev->caps.num_ports)
556 s_steer = &mlx4_priv(dev)->steer[port - 1];
557 mutex_lock(&priv->mcg_table.mutex);
559 pqp = get_promisc_qp(dev, port, steer, qpn);
560 if (unlikely(!pqp)) {
561 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
567 /*remove from list of promisc qps */
568 list_del(&pqp->list);
570 /* set the default entry not to include the removed one */
571 mailbox = mlx4_alloc_cmd_mailbox(dev);
572 if (IS_ERR(mailbox)) {
578 memset(mgm, 0, sizeof *mgm);
580 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
581 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
582 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
584 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
588 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
589 /* remove the qp from all the steering entries*/
590 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
592 list_for_each_entry(dqp, &entry->duplicates, list) {
593 if (dqp->qpn == qpn) {
599 /* a duplicate, no need to change the mgm,
600 * only update the duplicates list */
601 list_del(&dqp->list);
604 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
607 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
608 if (!members_count) {
609 mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0."
610 " deleting entry...\n", qpn, entry->index);
611 list_del(&entry->list);
616 for (i = 0; i < members_count; ++i)
617 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
623 mlx4_err(dev, "QP %06x wasn't found in entry %d\n",
629 /* copy the last QP in this MGM over removed QP */
630 mgm->qp[loc] = mgm->qp[members_count - 1];
631 mgm->qp[members_count - 1] = 0;
632 mgm->members_count = cpu_to_be32(--members_count |
633 (MLX4_PROT_ETH << 30));
635 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
643 mlx4_free_cmd_mailbox(dev, mailbox);
646 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
650 mutex_unlock(&priv->mcg_table.mutex);
655 * Caller must hold MCG table semaphore. gid and mgm parameters must
656 * be properly aligned for command interface.
658 * Returns 0 unless a firmware command error occurs.
660 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
661 * and *mgm holds MGM entry.
663 * if GID is found in AMGM, *index = index in AMGM, *prev = index of
664 * previous entry in hash chain and *mgm holds AMGM entry.
666 * If no AMGM exists for given gid, *index = -1, *prev = index of last
667 * entry in hash chain and *mgm holds end of hash chain.
669 static int find_entry(struct mlx4_dev *dev, u8 port,
670 u8 *gid, enum mlx4_protocol prot,
671 struct mlx4_cmd_mailbox *mgm_mailbox,
672 int *prev, int *index)
674 struct mlx4_cmd_mailbox *mailbox;
675 struct mlx4_mgm *mgm = mgm_mailbox->buf;
679 u8 op_mod = (prot == MLX4_PROT_ETH) ?
680 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
682 mailbox = mlx4_alloc_cmd_mailbox(dev);
687 memcpy(mgid, gid, 16);
689 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
690 mlx4_free_cmd_mailbox(dev, mailbox);
695 mlx4_dbg(dev, "Hash for "GID_PRINT_FMT" is %04x\n",
696 GID_PRINT_ARGS(gid), hash);
703 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
707 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
708 if (*index != hash) {
709 mlx4_err(dev, "Found zero MGID in AMGM.\n");
715 if (!memcmp(mgm->gid, gid, 16) &&
716 be32_to_cpu(mgm->members_count) >> 30 == prot)
720 *index = be32_to_cpu(mgm->next_gid_index) >> 6;
727 static const u8 __promisc_mode[] = {
728 [MLX4_FS_REGULAR] = 0x0,
729 [MLX4_FS_ALL_DEFAULT] = 0x1,
730 [MLX4_FS_MC_DEFAULT] = 0x3,
731 [MLX4_FS_UC_SNIFFER] = 0x4,
732 [MLX4_FS_MC_SNIFFER] = 0x5,
735 int map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
736 enum mlx4_net_trans_promisc_mode flow_type)
738 if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
739 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
742 return __promisc_mode[flow_type];
744 EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_mode);
746 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
747 struct mlx4_net_trans_rule_hw_ctrl *hw)
751 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
752 flags |= ctrl->exclusive ? (1 << 2) : 0;
753 flags |= ctrl->allow_loopback ? (1 << 3) : 0;
756 hw->type = __promisc_mode[ctrl->promisc_mode];
757 hw->prio = cpu_to_be16(ctrl->priority);
758 hw->port = ctrl->port;
759 hw->qpn = cpu_to_be32(ctrl->qpn);
762 const u16 __sw_id_hw[] = {
763 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
764 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
765 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
766 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
767 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
768 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
771 int map_sw_to_hw_steering_id(struct mlx4_dev *dev,
772 enum mlx4_net_trans_rule_id id)
774 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
775 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
778 return __sw_id_hw[id];
780 EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_id);
782 static const int __rule_hw_sz[] = {
783 [MLX4_NET_TRANS_RULE_ID_ETH] =
784 sizeof(struct mlx4_net_trans_rule_hw_eth),
785 [MLX4_NET_TRANS_RULE_ID_IB] =
786 sizeof(struct mlx4_net_trans_rule_hw_ib),
787 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
788 [MLX4_NET_TRANS_RULE_ID_IPV4] =
789 sizeof(struct mlx4_net_trans_rule_hw_ipv4),
790 [MLX4_NET_TRANS_RULE_ID_TCP] =
791 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
792 [MLX4_NET_TRANS_RULE_ID_UDP] =
793 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
796 int hw_rule_sz(struct mlx4_dev *dev,
797 enum mlx4_net_trans_rule_id id)
799 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
800 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
804 return __rule_hw_sz[id];
806 EXPORT_SYMBOL_GPL(hw_rule_sz);
808 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
809 struct _rule_hw *rule_hw)
811 if (hw_rule_sz(dev, spec->id) < 0)
813 memset(rule_hw, 0, hw_rule_sz(dev, spec->id));
814 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
815 rule_hw->size = hw_rule_sz(dev, spec->id) >> 2;
818 case MLX4_NET_TRANS_RULE_ID_ETH:
819 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
820 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
822 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
823 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
825 if (spec->eth.ether_type_enable) {
826 rule_hw->eth.ether_type_enable = 1;
827 rule_hw->eth.ether_type = spec->eth.ether_type;
829 rule_hw->eth.vlan_tag = spec->eth.vlan_id;
830 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
833 case MLX4_NET_TRANS_RULE_ID_IB:
834 rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
835 rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
836 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
837 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
840 case MLX4_NET_TRANS_RULE_ID_IPV6:
843 case MLX4_NET_TRANS_RULE_ID_IPV4:
844 rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
845 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
846 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
847 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
850 case MLX4_NET_TRANS_RULE_ID_TCP:
851 case MLX4_NET_TRANS_RULE_ID_UDP:
852 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
853 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
854 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
855 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
862 return __rule_hw_sz[spec->id];
865 static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
866 struct mlx4_net_trans_rule *rule)
869 struct mlx4_spec_list *cur;
873 mlx4_err(dev, "%s", str);
874 len += snprintf(buf + len, BUF_SIZE - len,
875 "port = %d prio = 0x%x qp = 0x%x ",
876 rule->port, rule->priority, rule->qpn);
878 list_for_each_entry(cur, &rule->list, list) {
880 case MLX4_NET_TRANS_RULE_ID_ETH:
881 len += snprintf(buf + len, BUF_SIZE - len,
882 "dmac = %pM ", &cur->eth.dst_mac);
883 if (cur->eth.ether_type)
884 len += snprintf(buf + len, BUF_SIZE - len,
886 be16_to_cpu(cur->eth.ether_type));
887 if (cur->eth.vlan_id)
888 len += snprintf(buf + len, BUF_SIZE - len,
890 be16_to_cpu(cur->eth.vlan_id));
893 case MLX4_NET_TRANS_RULE_ID_IPV4:
894 if (cur->ipv4.src_ip)
895 len += snprintf(buf + len, BUF_SIZE - len,
898 if (cur->ipv4.dst_ip)
899 len += snprintf(buf + len, BUF_SIZE - len,
904 case MLX4_NET_TRANS_RULE_ID_TCP:
905 case MLX4_NET_TRANS_RULE_ID_UDP:
906 if (cur->tcp_udp.src_port)
907 len += snprintf(buf + len, BUF_SIZE - len,
909 be16_to_cpu(cur->tcp_udp.src_port));
910 if (cur->tcp_udp.dst_port)
911 len += snprintf(buf + len, BUF_SIZE - len,
913 be16_to_cpu(cur->tcp_udp.dst_port));
916 case MLX4_NET_TRANS_RULE_ID_IB:
917 len += snprintf(buf + len, BUF_SIZE - len,
918 "dst-gid = "GID_PRINT_FMT"\n",
919 GID_PRINT_ARGS(cur->ib.dst_gid));
920 len += snprintf(buf + len, BUF_SIZE - len,
921 "dst-gid-mask = "GID_PRINT_FMT"\n",
922 GID_PRINT_ARGS(cur->ib.dst_gid_msk));
925 case MLX4_NET_TRANS_RULE_ID_IPV6:
932 len += snprintf(buf + len, BUF_SIZE - len, "\n");
933 mlx4_err(dev, "%s", buf);
936 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
939 int mlx4_flow_attach(struct mlx4_dev *dev,
940 struct mlx4_net_trans_rule *rule, u64 *reg_id)
942 struct mlx4_cmd_mailbox *mailbox;
943 struct mlx4_spec_list *cur;
947 mailbox = mlx4_alloc_cmd_mailbox(dev);
949 return PTR_ERR(mailbox);
951 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
952 trans_rule_ctrl_to_hw(rule, mailbox->buf);
954 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
956 list_for_each_entry(cur, &rule->list, list) {
957 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
959 mlx4_free_cmd_mailbox(dev, mailbox);
965 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
968 "mcg table is full. Fail to register network rule.\n",
971 mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
973 mlx4_free_cmd_mailbox(dev, mailbox);
977 EXPORT_SYMBOL_GPL(mlx4_flow_attach);
979 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
983 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
985 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
986 (unsigned long long)reg_id);
989 EXPORT_SYMBOL_GPL(mlx4_flow_detach);
991 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn)
996 in_param = ((u64) min_range_qpn) << 32;
997 in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF;
999 err = mlx4_cmd(dev, in_param, 0, 0,
1000 MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1001 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1005 EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE);
1007 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1008 int block_mcast_loopback, enum mlx4_protocol prot,
1009 enum mlx4_steer_type steer)
1011 struct mlx4_priv *priv = mlx4_priv(dev);
1012 struct mlx4_cmd_mailbox *mailbox;
1013 struct mlx4_mgm *mgm;
1022 mailbox = mlx4_alloc_cmd_mailbox(dev);
1023 if (IS_ERR(mailbox))
1024 return PTR_ERR(mailbox);
1027 mutex_lock(&priv->mcg_table.mutex);
1028 err = find_entry(dev, port, gid, prot,
1029 mailbox, &prev, &index);
1034 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
1036 memcpy(mgm->gid, gid, 16);
1041 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
1043 mlx4_err(dev, "No AMGM entries left\n");
1047 index += dev->caps.num_mgms;
1050 memset(mgm, 0, sizeof *mgm);
1051 memcpy(mgm->gid, gid, 16);
1054 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
1055 if (members_count == dev->caps.num_qp_per_mgm) {
1056 mlx4_err(dev, "MGM at index %x is full.\n", index);
1061 for (i = 0; i < members_count; ++i)
1062 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
1063 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
1068 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
1069 (!!mlx4_blck_lb << MGM_BLCK_LB_BIT));
1071 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
1073 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1077 /* if !link, still add the new entry. */
1081 err = mlx4_READ_ENTRY(dev, prev, mailbox);
1085 mgm->next_gid_index = cpu_to_be32(index << 6);
1087 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
1092 if (prot == MLX4_PROT_ETH) {
1093 /* manage the steering entry for promisc mode */
1095 new_steering_entry(dev, port, steer, index, qp->qpn);
1097 existing_steering_entry(dev, port, steer,
1102 if (err && link && index != -1) {
1103 if (index < dev->caps.num_mgms)
1104 mlx4_warn(dev, "Got AMGM index %d < %d",
1105 index, dev->caps.num_mgms);
1107 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1108 index - dev->caps.num_mgms, MLX4_USE_RR);
1110 mutex_unlock(&priv->mcg_table.mutex);
1112 mlx4_free_cmd_mailbox(dev, mailbox);
1116 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1117 enum mlx4_protocol prot, enum mlx4_steer_type steer)
1119 struct mlx4_priv *priv = mlx4_priv(dev);
1120 struct mlx4_cmd_mailbox *mailbox;
1121 struct mlx4_mgm *mgm;
1127 bool removed_entry = false;
1129 mailbox = mlx4_alloc_cmd_mailbox(dev);
1130 if (IS_ERR(mailbox))
1131 return PTR_ERR(mailbox);
1134 mutex_lock(&priv->mcg_table.mutex);
1136 err = find_entry(dev, port, gid, prot,
1137 mailbox, &prev, &index);
1142 mlx4_err(dev, "MGID "GID_PRINT_FMT" not found\n",
1143 GID_PRINT_ARGS(gid));
1149 if this QP is also a promisc QP, it shouldn't be removed only if
1150 at least one none promisc QP is also attached to this MCG
1152 if (prot == MLX4_PROT_ETH &&
1153 check_duplicate_entry(dev, port, steer, index, qp->qpn) &&
1154 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL))
1157 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
1158 for (i = 0; i < members_count; ++i)
1159 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
1165 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
1170 /* copy the last QP in this MGM over removed QP */
1171 mgm->qp[loc] = mgm->qp[members_count - 1];
1172 mgm->qp[members_count - 1] = 0;
1173 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
1175 if (prot == MLX4_PROT_ETH)
1176 removed_entry = can_remove_steering_entry(dev, port, steer,
1178 if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) {
1179 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1183 /* We are going to delete the entry, members count should be 0 */
1184 mgm->members_count = cpu_to_be32((u32) prot << 30);
1187 /* Remove entry from MGM */
1188 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1190 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
1194 memset(mgm->gid, 0, 16);
1196 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1201 if (amgm_index < dev->caps.num_mgms)
1202 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
1203 index, amgm_index, dev->caps.num_mgms);
1205 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1206 amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
1209 /* Remove entry from AMGM */
1210 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1211 err = mlx4_READ_ENTRY(dev, prev, mailbox);
1215 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
1217 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
1221 if (index < dev->caps.num_mgms)
1222 mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
1223 prev, index, dev->caps.num_mgms);
1225 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1226 index - dev->caps.num_mgms, MLX4_USE_RR);
1230 mutex_unlock(&priv->mcg_table.mutex);
1232 mlx4_free_cmd_mailbox(dev, mailbox);
1236 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
1237 u8 gid[16], u8 attach, u8 block_loopback,
1238 enum mlx4_protocol prot)
1240 struct mlx4_cmd_mailbox *mailbox;
1244 if (!mlx4_is_mfunc(dev))
1247 mailbox = mlx4_alloc_cmd_mailbox(dev);
1248 if (IS_ERR(mailbox))
1249 return PTR_ERR(mailbox);
1251 memcpy(mailbox->buf, gid, 16);
1253 qpn |= (prot << 28);
1254 if (attach && block_loopback)
1257 err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
1258 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
1261 mlx4_free_cmd_mailbox(dev, mailbox);
1265 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1266 u8 gid[16], u8 port,
1267 int block_mcast_loopback,
1268 enum mlx4_protocol prot, u64 *reg_id)
1270 struct mlx4_spec_list spec = { {NULL} };
1271 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1273 struct mlx4_net_trans_rule rule = {
1274 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1276 .promisc_mode = MLX4_FS_REGULAR,
1277 .priority = MLX4_DOMAIN_NIC,
1280 rule.allow_loopback = !block_mcast_loopback;
1283 INIT_LIST_HEAD(&rule.list);
1287 spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
1288 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
1289 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1292 case MLX4_PROT_IB_IPV6:
1293 spec.id = MLX4_NET_TRANS_RULE_ID_IB;
1294 memcpy(spec.ib.dst_gid, gid, 16);
1295 memset(&spec.ib.dst_gid_msk, 0xff, 16);
1300 list_add_tail(&spec.list, &rule.list);
1302 return mlx4_flow_attach(dev, &rule, reg_id);
1305 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1306 u8 port, int block_mcast_loopback,
1307 enum mlx4_protocol prot, u64 *reg_id)
1309 enum mlx4_steer_type steer;
1310 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
1312 switch (dev->caps.steering_mode) {
1313 case MLX4_STEERING_MODE_A0:
1314 if (prot == MLX4_PROT_ETH)
1317 case MLX4_STEERING_MODE_B0:
1318 if (prot == MLX4_PROT_ETH)
1319 gid[7] |= (steer << 1);
1321 if (mlx4_is_mfunc(dev))
1322 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1323 block_mcast_loopback, prot);
1324 return mlx4_qp_attach_common(dev, qp, gid,
1325 block_mcast_loopback, prot,
1328 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1329 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
1330 block_mcast_loopback,
1336 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
1338 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1339 enum mlx4_protocol prot, u64 reg_id)
1341 enum mlx4_steer_type steer;
1342 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
1344 switch (dev->caps.steering_mode) {
1345 case MLX4_STEERING_MODE_A0:
1346 if (prot == MLX4_PROT_ETH)
1349 case MLX4_STEERING_MODE_B0:
1350 if (prot == MLX4_PROT_ETH)
1351 gid[7] |= (steer << 1);
1353 if (mlx4_is_mfunc(dev))
1354 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1356 return mlx4_qp_detach_common(dev, qp, gid, prot,
1359 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1360 return mlx4_flow_detach(dev, reg_id);
1366 EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
1368 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1369 u32 qpn, enum mlx4_net_trans_promisc_mode mode)
1371 struct mlx4_net_trans_rule rule;
1375 case MLX4_FS_ALL_DEFAULT:
1376 regid_p = &dev->regid_promisc_array[port];
1378 case MLX4_FS_MC_DEFAULT:
1379 regid_p = &dev->regid_allmulti_array[port];
1388 rule.promisc_mode = mode;
1391 INIT_LIST_HEAD(&rule.list);
1392 mlx4_err(dev, "going promisc on %x\n", port);
1394 return mlx4_flow_attach(dev, &rule, regid_p);
1396 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
1398 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1399 enum mlx4_net_trans_promisc_mode mode)
1405 case MLX4_FS_ALL_DEFAULT:
1406 regid_p = &dev->regid_promisc_array[port];
1408 case MLX4_FS_MC_DEFAULT:
1409 regid_p = &dev->regid_allmulti_array[port];
1418 ret = mlx4_flow_detach(dev, *regid_p);
1424 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
1426 int mlx4_unicast_attach(struct mlx4_dev *dev,
1427 struct mlx4_qp *qp, u8 gid[16],
1428 int block_mcast_loopback, enum mlx4_protocol prot)
1430 if (prot == MLX4_PROT_ETH)
1431 gid[7] |= (MLX4_UC_STEER << 1);
1433 if (mlx4_is_mfunc(dev))
1434 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1435 block_mcast_loopback, prot);
1437 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
1438 prot, MLX4_UC_STEER);
1440 EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
1442 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1443 u8 gid[16], enum mlx4_protocol prot)
1445 if (prot == MLX4_PROT_ETH)
1446 gid[7] |= (MLX4_UC_STEER << 1);
1448 if (mlx4_is_mfunc(dev))
1449 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1451 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
1453 EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
1455 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1456 struct mlx4_vhcr *vhcr,
1457 struct mlx4_cmd_mailbox *inbox,
1458 struct mlx4_cmd_mailbox *outbox,
1459 struct mlx4_cmd_info *cmd)
1461 u32 qpn = (u32) vhcr->in_param & 0xffffffff;
1462 u8 port = vhcr->in_param >> 62;
1463 enum mlx4_steer_type steer = vhcr->in_modifier;
1465 /* Promiscuous unicast is not allowed in mfunc for VFs */
1466 if ((slave != dev->caps.function) && (steer == MLX4_UC_STEER))
1469 if (vhcr->op_modifier)
1470 return add_promisc_qp(dev, port, steer, qpn);
1472 return remove_promisc_qp(dev, port, steer, qpn);
1475 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
1476 enum mlx4_steer_type steer, u8 add, u8 port)
1478 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
1479 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
1483 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1485 if (mlx4_is_mfunc(dev))
1486 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
1488 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1490 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
1492 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1494 if (mlx4_is_mfunc(dev))
1495 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
1497 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1499 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
1501 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1503 if (mlx4_is_mfunc(dev))
1504 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
1506 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1508 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
1510 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1512 if (mlx4_is_mfunc(dev))
1513 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
1515 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1517 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
1519 int mlx4_init_mcg_table(struct mlx4_dev *dev)
1521 struct mlx4_priv *priv = mlx4_priv(dev);
1524 /* No need for mcg_table when fw managed the mcg table*/
1525 if (dev->caps.steering_mode ==
1526 MLX4_STEERING_MODE_DEVICE_MANAGED)
1528 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1529 dev->caps.num_amgms - 1, 0, 0);
1533 mutex_init(&priv->mcg_table.mutex);
1538 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
1540 if (dev->caps.steering_mode !=
1541 MLX4_STEERING_MODE_DEVICE_MANAGED)
1542 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);