2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/string.h>
35 #include <linux/etherdevice.h>
37 #include <linux/mlx4/cmd.h>
38 #include <linux/module.h>
42 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
44 return 1 << dev->oper_log_mgm_entry_size;
47 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
49 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
52 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
53 struct mlx4_cmd_mailbox *mailbox,
60 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
61 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
70 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
74 err = mlx4_cmd(dev, regid, 0, 0,
75 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
81 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
82 struct mlx4_cmd_mailbox *mailbox)
84 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
85 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
88 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
89 struct mlx4_cmd_mailbox *mailbox)
91 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
92 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
95 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
96 struct mlx4_cmd_mailbox *mailbox)
100 in_mod = (u32) port << 16 | steer << 1;
101 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
102 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
106 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
107 u16 *hash, u8 op_mod)
112 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
113 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
122 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
123 enum mlx4_steer_type steer,
126 struct mlx4_steer *s_steer;
127 struct mlx4_promisc_qp *pqp;
129 if (port < 1 || port > dev->caps.num_ports)
132 s_steer = &mlx4_priv(dev)->steer[port - 1];
134 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
143 * Add new entry to steering data structure.
144 * All promisc QPs should be added as well
146 static int new_steering_entry(struct mlx4_dev *dev, u8 port,
147 enum mlx4_steer_type steer,
148 unsigned int index, u32 qpn)
150 struct mlx4_steer *s_steer;
151 struct mlx4_cmd_mailbox *mailbox;
152 struct mlx4_mgm *mgm;
154 struct mlx4_steer_index *new_entry;
155 struct mlx4_promisc_qp *pqp;
156 struct mlx4_promisc_qp *dqp = NULL;
160 if (port < 1 || port > dev->caps.num_ports)
163 s_steer = &mlx4_priv(dev)->steer[port - 1];
164 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
168 INIT_LIST_HEAD(&new_entry->duplicates);
169 new_entry->index = index;
170 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
172 /* If the given qpn is also a promisc qp,
173 * it should be inserted to duplicates list
175 pqp = get_promisc_qp(dev, port, steer, qpn);
177 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
183 list_add_tail(&dqp->list, &new_entry->duplicates);
186 /* if no promisc qps for this vep, we are done */
187 if (list_empty(&s_steer->promisc_qps[steer]))
190 /* now need to add all the promisc qps to the new
191 * steering entry, as they should also receive the packets
192 * destined to this address */
193 mailbox = mlx4_alloc_cmd_mailbox(dev);
194 if (IS_ERR(mailbox)) {
200 err = mlx4_READ_ENTRY(dev, index, mailbox);
204 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
205 prot = be32_to_cpu(mgm->members_count) >> 30;
206 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
207 /* don't add already existing qpn */
210 if (members_count == dev->caps.num_qp_per_mgm) {
217 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
219 /* update the qps count and update the entry with all the promisc qps*/
220 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
221 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
224 mlx4_free_cmd_mailbox(dev, mailbox);
229 list_del(&dqp->list);
232 list_del(&new_entry->list);
237 /* update the data structures with existing steering entry */
238 static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
239 enum mlx4_steer_type steer,
240 unsigned int index, u32 qpn)
242 struct mlx4_steer *s_steer;
243 struct mlx4_steer_index *tmp_entry, *entry = NULL;
244 struct mlx4_promisc_qp *pqp;
245 struct mlx4_promisc_qp *dqp;
247 if (port < 1 || port > dev->caps.num_ports)
250 s_steer = &mlx4_priv(dev)->steer[port - 1];
252 pqp = get_promisc_qp(dev, port, steer, qpn);
254 return 0; /* nothing to do */
256 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
257 if (tmp_entry->index == index) {
262 if (unlikely(!entry)) {
263 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
267 /* the given qpn is listed as a promisc qpn
268 * we need to add it as a duplicate to this entry
269 * for future references */
270 list_for_each_entry(dqp, &entry->duplicates, list) {
272 return 0; /* qp is already duplicated */
275 /* add the qp as a duplicate on this index */
276 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
280 list_add_tail(&dqp->list, &entry->duplicates);
285 /* Check whether a qpn is a duplicate on steering entry
286 * If so, it should not be removed from mgm */
287 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
288 enum mlx4_steer_type steer,
289 unsigned int index, u32 qpn)
291 struct mlx4_steer *s_steer;
292 struct mlx4_steer_index *tmp_entry, *entry = NULL;
293 struct mlx4_promisc_qp *dqp, *tmp_dqp;
295 if (port < 1 || port > dev->caps.num_ports)
298 s_steer = &mlx4_priv(dev)->steer[port - 1];
300 /* if qp is not promisc, it cannot be duplicated */
301 if (!get_promisc_qp(dev, port, steer, qpn))
304 /* The qp is promisc qp so it is a duplicate on this index
305 * Find the index entry, and remove the duplicate */
306 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
307 if (tmp_entry->index == index) {
312 if (unlikely(!entry)) {
313 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
316 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
317 if (dqp->qpn == qpn) {
318 list_del(&dqp->list);
326 * returns true if all the QPs != tqpn contained in this entry
327 * are Promisc QPs. return false otherwise.
329 static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
330 enum mlx4_steer_type steer,
331 unsigned int index, u32 tqpn, u32 *members_count)
333 struct mlx4_steer *s_steer;
334 struct mlx4_cmd_mailbox *mailbox;
335 struct mlx4_mgm *mgm;
340 if (port < 1 || port > dev->caps.num_ports)
343 s_steer = &mlx4_priv(dev)->steer[port - 1];
345 mailbox = mlx4_alloc_cmd_mailbox(dev);
350 if (mlx4_READ_ENTRY(dev, index, mailbox))
352 m_count = be32_to_cpu(mgm->members_count) & 0xffffff;
354 *members_count = m_count;
356 for (i = 0; i < m_count; i++) {
357 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
358 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
359 /* the qp is not promisc, the entry can't be removed */
365 mlx4_free_cmd_mailbox(dev, mailbox);
369 /* IF a steering entry contains only promisc QPs, it can be removed. */
370 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
371 enum mlx4_steer_type steer,
372 unsigned int index, u32 tqpn)
374 struct mlx4_steer *s_steer;
375 struct mlx4_steer_index *entry = NULL, *tmp_entry;
379 if (port < 1 || port > dev->caps.num_ports)
382 s_steer = &mlx4_priv(dev)->steer[port - 1];
384 if (!promisc_steering_entry(dev, port, steer, index, tqpn, &members_count))
387 /* All the qps currently registered for this entry are promiscuous,
388 * Checking for duplicates */
390 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
391 if (entry->index == index) {
392 if (list_empty(&entry->duplicates) || members_count == 1) {
393 struct mlx4_promisc_qp *pqp, *tmp_pqp;
395 * If there is only 1 entry in duplicates than
396 * this is the QP we want to delete, going over
397 * the list and deleting the entry.
399 list_del(&entry->list);
400 list_for_each_entry_safe(pqp, tmp_pqp,
403 list_del(&pqp->list);
408 /* This entry contains duplicates so it shouldn't be removed */
419 static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
420 enum mlx4_steer_type steer, u32 qpn)
422 struct mlx4_steer *s_steer;
423 struct mlx4_cmd_mailbox *mailbox;
424 struct mlx4_mgm *mgm;
425 struct mlx4_steer_index *entry;
426 struct mlx4_promisc_qp *pqp;
427 struct mlx4_promisc_qp *dqp;
433 struct mlx4_priv *priv = mlx4_priv(dev);
435 if (port < 1 || port > dev->caps.num_ports)
438 s_steer = &mlx4_priv(dev)->steer[port - 1];
440 mutex_lock(&priv->mcg_table.mutex);
442 if (get_promisc_qp(dev, port, steer, qpn)) {
443 err = 0; /* Noting to do, already exists */
447 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
454 mailbox = mlx4_alloc_cmd_mailbox(dev);
455 if (IS_ERR(mailbox)) {
461 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
462 /* the promisc qp needs to be added for each one of the steering
463 * entries, if it already exists, needs to be added as a duplicate
465 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
466 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
470 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
471 prot = be32_to_cpu(mgm->members_count) >> 30;
473 for (i = 0; i < members_count; i++) {
474 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
475 /* Entry already exists, add to duplicates */
476 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
482 list_add_tail(&dqp->list, &entry->duplicates);
487 /* Need to add the qpn to mgm */
488 if (members_count == dev->caps.num_qp_per_mgm) {
493 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
494 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
495 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
502 /* add the new qpn to list of promisc qps */
503 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
504 /* now need to add all the promisc qps to default entry */
505 memset(mgm, 0, sizeof *mgm);
507 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) {
508 if (members_count == dev->caps.num_qp_per_mgm) {
513 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
515 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
517 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
521 mlx4_free_cmd_mailbox(dev, mailbox);
522 mutex_unlock(&priv->mcg_table.mutex);
526 list_del(&pqp->list);
528 mlx4_free_cmd_mailbox(dev, mailbox);
532 mutex_unlock(&priv->mcg_table.mutex);
536 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
537 enum mlx4_steer_type steer, u32 qpn)
539 struct mlx4_priv *priv = mlx4_priv(dev);
540 struct mlx4_steer *s_steer;
541 struct mlx4_cmd_mailbox *mailbox;
542 struct mlx4_mgm *mgm;
543 struct mlx4_steer_index *entry, *tmp_entry;
544 struct mlx4_promisc_qp *pqp;
545 struct mlx4_promisc_qp *dqp;
548 bool back_to_list = false;
552 if (port < 1 || port > dev->caps.num_ports)
555 s_steer = &mlx4_priv(dev)->steer[port - 1];
556 mutex_lock(&priv->mcg_table.mutex);
558 pqp = get_promisc_qp(dev, port, steer, qpn);
559 if (unlikely(!pqp)) {
560 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
566 /*remove from list of promisc qps */
567 list_del(&pqp->list);
569 /* set the default entry not to include the removed one */
570 mailbox = mlx4_alloc_cmd_mailbox(dev);
571 if (IS_ERR(mailbox)) {
577 memset(mgm, 0, sizeof *mgm);
579 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
580 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
581 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
583 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
587 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
588 /* remove the qp from all the steering entries*/
589 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
591 list_for_each_entry(dqp, &entry->duplicates, list) {
592 if (dqp->qpn == qpn) {
598 /* a duplicate, no need to change the mgm,
599 * only update the duplicates list */
600 list_del(&dqp->list);
603 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
606 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
607 if (!members_count) {
608 mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0."
609 " deleting entry...\n", qpn, entry->index);
610 list_del(&entry->list);
615 for (i = 0; i < members_count; ++i)
616 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
622 mlx4_err(dev, "QP %06x wasn't found in entry %d\n",
628 /* copy the last QP in this MGM over removed QP */
629 mgm->qp[loc] = mgm->qp[members_count - 1];
630 mgm->qp[members_count - 1] = 0;
631 mgm->members_count = cpu_to_be32(--members_count |
632 (MLX4_PROT_ETH << 30));
634 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
642 mlx4_free_cmd_mailbox(dev, mailbox);
645 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
649 mutex_unlock(&priv->mcg_table.mutex);
654 * Caller must hold MCG table semaphore. gid and mgm parameters must
655 * be properly aligned for command interface.
657 * Returns 0 unless a firmware command error occurs.
659 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
660 * and *mgm holds MGM entry.
662 * if GID is found in AMGM, *index = index in AMGM, *prev = index of
663 * previous entry in hash chain and *mgm holds AMGM entry.
665 * If no AMGM exists for given gid, *index = -1, *prev = index of last
666 * entry in hash chain and *mgm holds end of hash chain.
668 static int find_entry(struct mlx4_dev *dev, u8 port,
669 u8 *gid, enum mlx4_protocol prot,
670 struct mlx4_cmd_mailbox *mgm_mailbox,
671 int *prev, int *index)
673 struct mlx4_cmd_mailbox *mailbox;
674 struct mlx4_mgm *mgm = mgm_mailbox->buf;
678 u8 op_mod = (prot == MLX4_PROT_ETH) ?
679 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
681 mailbox = mlx4_alloc_cmd_mailbox(dev);
686 memcpy(mgid, gid, 16);
688 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
689 mlx4_free_cmd_mailbox(dev, mailbox);
694 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
700 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
704 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
705 if (*index != hash) {
706 mlx4_err(dev, "Found zero MGID in AMGM.\n");
712 if (!memcmp(mgm->gid, gid, 16) &&
713 be32_to_cpu(mgm->members_count) >> 30 == prot)
717 *index = be32_to_cpu(mgm->next_gid_index) >> 6;
724 static const u8 __promisc_mode[] = {
725 [MLX4_FS_REGULAR] = 0x0,
726 [MLX4_FS_ALL_DEFAULT] = 0x1,
727 [MLX4_FS_MC_DEFAULT] = 0x3,
728 [MLX4_FS_UC_SNIFFER] = 0x4,
729 [MLX4_FS_MC_SNIFFER] = 0x5,
732 int map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
733 enum mlx4_net_trans_promisc_mode flow_type)
735 if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
736 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
739 return __promisc_mode[flow_type];
741 EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_mode);
743 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
744 struct mlx4_net_trans_rule_hw_ctrl *hw)
748 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
749 flags |= ctrl->exclusive ? (1 << 2) : 0;
750 flags |= ctrl->allow_loopback ? (1 << 3) : 0;
753 hw->type = __promisc_mode[ctrl->promisc_mode];
754 hw->prio = cpu_to_be16(ctrl->priority);
755 hw->port = ctrl->port;
756 hw->qpn = cpu_to_be32(ctrl->qpn);
759 const u16 __sw_id_hw[] = {
760 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
761 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
762 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
763 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
764 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
765 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
768 int map_sw_to_hw_steering_id(struct mlx4_dev *dev,
769 enum mlx4_net_trans_rule_id id)
771 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
772 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
775 return __sw_id_hw[id];
777 EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_id);
779 static const int __rule_hw_sz[] = {
780 [MLX4_NET_TRANS_RULE_ID_ETH] =
781 sizeof(struct mlx4_net_trans_rule_hw_eth),
782 [MLX4_NET_TRANS_RULE_ID_IB] =
783 sizeof(struct mlx4_net_trans_rule_hw_ib),
784 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
785 [MLX4_NET_TRANS_RULE_ID_IPV4] =
786 sizeof(struct mlx4_net_trans_rule_hw_ipv4),
787 [MLX4_NET_TRANS_RULE_ID_TCP] =
788 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
789 [MLX4_NET_TRANS_RULE_ID_UDP] =
790 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
793 int hw_rule_sz(struct mlx4_dev *dev,
794 enum mlx4_net_trans_rule_id id)
796 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
797 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
801 return __rule_hw_sz[id];
803 EXPORT_SYMBOL_GPL(hw_rule_sz);
805 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
806 struct _rule_hw *rule_hw)
808 if (hw_rule_sz(dev, spec->id) < 0)
810 memset(rule_hw, 0, hw_rule_sz(dev, spec->id));
811 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
812 rule_hw->size = hw_rule_sz(dev, spec->id) >> 2;
815 case MLX4_NET_TRANS_RULE_ID_ETH:
816 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
817 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
819 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
820 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
822 if (spec->eth.ether_type_enable) {
823 rule_hw->eth.ether_type_enable = 1;
824 rule_hw->eth.ether_type = spec->eth.ether_type;
826 rule_hw->eth.vlan_tag = spec->eth.vlan_id;
827 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
830 case MLX4_NET_TRANS_RULE_ID_IB:
831 rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
832 rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
833 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
834 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
837 case MLX4_NET_TRANS_RULE_ID_IPV6:
840 case MLX4_NET_TRANS_RULE_ID_IPV4:
841 rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
842 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
843 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
844 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
847 case MLX4_NET_TRANS_RULE_ID_TCP:
848 case MLX4_NET_TRANS_RULE_ID_UDP:
849 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
850 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
851 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
852 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
859 return __rule_hw_sz[spec->id];
862 static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
863 struct mlx4_net_trans_rule *rule)
866 struct mlx4_spec_list *cur;
870 mlx4_err(dev, "%s", str);
871 len += snprintf(buf + len, BUF_SIZE - len,
872 "port = %d prio = 0x%x qp = 0x%x ",
873 rule->port, rule->priority, rule->qpn);
875 list_for_each_entry(cur, &rule->list, list) {
877 case MLX4_NET_TRANS_RULE_ID_ETH:
878 len += snprintf(buf + len, BUF_SIZE - len,
879 "dmac = %pM ", &cur->eth.dst_mac);
880 if (cur->eth.ether_type)
881 len += snprintf(buf + len, BUF_SIZE - len,
883 be16_to_cpu(cur->eth.ether_type));
884 if (cur->eth.vlan_id)
885 len += snprintf(buf + len, BUF_SIZE - len,
887 be16_to_cpu(cur->eth.vlan_id));
890 case MLX4_NET_TRANS_RULE_ID_IPV4:
891 if (cur->ipv4.src_ip)
892 len += snprintf(buf + len, BUF_SIZE - len,
895 if (cur->ipv4.dst_ip)
896 len += snprintf(buf + len, BUF_SIZE - len,
901 case MLX4_NET_TRANS_RULE_ID_TCP:
902 case MLX4_NET_TRANS_RULE_ID_UDP:
903 if (cur->tcp_udp.src_port)
904 len += snprintf(buf + len, BUF_SIZE - len,
906 be16_to_cpu(cur->tcp_udp.src_port));
907 if (cur->tcp_udp.dst_port)
908 len += snprintf(buf + len, BUF_SIZE - len,
910 be16_to_cpu(cur->tcp_udp.dst_port));
913 case MLX4_NET_TRANS_RULE_ID_IB:
914 len += snprintf(buf + len, BUF_SIZE - len,
915 "dst-gid = %pI6\n", cur->ib.dst_gid);
916 len += snprintf(buf + len, BUF_SIZE - len,
917 "dst-gid-mask = %pI6\n",
918 cur->ib.dst_gid_msk);
921 case MLX4_NET_TRANS_RULE_ID_IPV6:
928 len += snprintf(buf + len, BUF_SIZE - len, "\n");
929 mlx4_err(dev, "%s", buf);
932 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
935 int mlx4_flow_attach(struct mlx4_dev *dev,
936 struct mlx4_net_trans_rule *rule, u64 *reg_id)
938 struct mlx4_cmd_mailbox *mailbox;
939 struct mlx4_spec_list *cur;
943 mailbox = mlx4_alloc_cmd_mailbox(dev);
945 return PTR_ERR(mailbox);
947 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
948 trans_rule_ctrl_to_hw(rule, mailbox->buf);
950 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
952 list_for_each_entry(cur, &rule->list, list) {
953 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
955 mlx4_free_cmd_mailbox(dev, mailbox);
961 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
964 "mcg table is full. Fail to register network rule.\n",
967 mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
969 mlx4_free_cmd_mailbox(dev, mailbox);
973 EXPORT_SYMBOL_GPL(mlx4_flow_attach);
975 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
979 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
981 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
982 (unsigned long long)reg_id);
985 EXPORT_SYMBOL_GPL(mlx4_flow_detach);
987 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn)
992 in_param = ((u64) min_range_qpn) << 32;
993 in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF;
995 err = mlx4_cmd(dev, in_param, 0, 0,
996 MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
997 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1001 EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE);
1003 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1004 int block_mcast_loopback, enum mlx4_protocol prot,
1005 enum mlx4_steer_type steer)
1007 struct mlx4_priv *priv = mlx4_priv(dev);
1008 struct mlx4_cmd_mailbox *mailbox;
1009 struct mlx4_mgm *mgm;
1018 mailbox = mlx4_alloc_cmd_mailbox(dev);
1019 if (IS_ERR(mailbox))
1020 return PTR_ERR(mailbox);
1023 mutex_lock(&priv->mcg_table.mutex);
1024 err = find_entry(dev, port, gid, prot,
1025 mailbox, &prev, &index);
1030 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
1032 memcpy(mgm->gid, gid, 16);
1037 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
1039 mlx4_err(dev, "No AMGM entries left\n");
1043 index += dev->caps.num_mgms;
1046 memset(mgm, 0, sizeof *mgm);
1047 memcpy(mgm->gid, gid, 16);
1050 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
1051 if (members_count == dev->caps.num_qp_per_mgm) {
1052 mlx4_err(dev, "MGM at index %x is full.\n", index);
1057 for (i = 0; i < members_count; ++i)
1058 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
1059 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
1064 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
1065 (!!mlx4_blck_lb << MGM_BLCK_LB_BIT));
1067 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
1069 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1073 /* if !link, still add the new entry. */
1077 err = mlx4_READ_ENTRY(dev, prev, mailbox);
1081 mgm->next_gid_index = cpu_to_be32(index << 6);
1083 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
1088 if (prot == MLX4_PROT_ETH) {
1089 /* manage the steering entry for promisc mode */
1091 new_steering_entry(dev, port, steer, index, qp->qpn);
1093 existing_steering_entry(dev, port, steer,
1098 if (err && link && index != -1) {
1099 if (index < dev->caps.num_mgms)
1100 mlx4_warn(dev, "Got AMGM index %d < %d",
1101 index, dev->caps.num_mgms);
1103 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1104 index - dev->caps.num_mgms, MLX4_USE_RR);
1106 mutex_unlock(&priv->mcg_table.mutex);
1108 mlx4_free_cmd_mailbox(dev, mailbox);
1112 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1113 enum mlx4_protocol prot, enum mlx4_steer_type steer)
1115 struct mlx4_priv *priv = mlx4_priv(dev);
1116 struct mlx4_cmd_mailbox *mailbox;
1117 struct mlx4_mgm *mgm;
1123 bool removed_entry = false;
1125 mailbox = mlx4_alloc_cmd_mailbox(dev);
1126 if (IS_ERR(mailbox))
1127 return PTR_ERR(mailbox);
1130 mutex_lock(&priv->mcg_table.mutex);
1132 err = find_entry(dev, port, gid, prot,
1133 mailbox, &prev, &index);
1138 mlx4_err(dev, "MGID %pI6 not found\n", gid);
1144 if this QP is also a promisc QP, it shouldn't be removed only if
1145 at least one none promisc QP is also attached to this MCG
1147 if (prot == MLX4_PROT_ETH &&
1148 check_duplicate_entry(dev, port, steer, index, qp->qpn) &&
1149 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL))
1152 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
1153 for (i = 0; i < members_count; ++i)
1154 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
1160 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
1165 /* copy the last QP in this MGM over removed QP */
1166 mgm->qp[loc] = mgm->qp[members_count - 1];
1167 mgm->qp[members_count - 1] = 0;
1168 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
1170 if (prot == MLX4_PROT_ETH)
1171 removed_entry = can_remove_steering_entry(dev, port, steer,
1173 if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) {
1174 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1178 /* We are going to delete the entry, members count should be 0 */
1179 mgm->members_count = cpu_to_be32((u32) prot << 30);
1182 /* Remove entry from MGM */
1183 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1185 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
1189 memset(mgm->gid, 0, 16);
1191 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1196 if (amgm_index < dev->caps.num_mgms)
1197 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
1198 index, amgm_index, dev->caps.num_mgms);
1200 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1201 amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
1204 /* Remove entry from AMGM */
1205 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1206 err = mlx4_READ_ENTRY(dev, prev, mailbox);
1210 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
1212 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
1216 if (index < dev->caps.num_mgms)
1217 mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
1218 prev, index, dev->caps.num_mgms);
1220 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1221 index - dev->caps.num_mgms, MLX4_USE_RR);
1225 mutex_unlock(&priv->mcg_table.mutex);
1227 mlx4_free_cmd_mailbox(dev, mailbox);
1231 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
1232 u8 gid[16], u8 attach, u8 block_loopback,
1233 enum mlx4_protocol prot)
1235 struct mlx4_cmd_mailbox *mailbox;
1239 if (!mlx4_is_mfunc(dev))
1242 mailbox = mlx4_alloc_cmd_mailbox(dev);
1243 if (IS_ERR(mailbox))
1244 return PTR_ERR(mailbox);
1246 memcpy(mailbox->buf, gid, 16);
1248 qpn |= (prot << 28);
1249 if (attach && block_loopback)
1252 err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
1253 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
1256 mlx4_free_cmd_mailbox(dev, mailbox);
1260 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1261 u8 gid[16], u8 port,
1262 int block_mcast_loopback,
1263 enum mlx4_protocol prot, u64 *reg_id)
1265 struct mlx4_spec_list spec = { {NULL} };
1266 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1268 struct mlx4_net_trans_rule rule = {
1269 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1271 .promisc_mode = MLX4_FS_REGULAR,
1272 .priority = MLX4_DOMAIN_NIC,
1275 rule.allow_loopback = !block_mcast_loopback;
1278 INIT_LIST_HEAD(&rule.list);
1282 spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
1283 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
1284 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1287 case MLX4_PROT_IB_IPV6:
1288 spec.id = MLX4_NET_TRANS_RULE_ID_IB;
1289 memcpy(spec.ib.dst_gid, gid, 16);
1290 memset(&spec.ib.dst_gid_msk, 0xff, 16);
1295 list_add_tail(&spec.list, &rule.list);
1297 return mlx4_flow_attach(dev, &rule, reg_id);
1300 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1301 u8 port, int block_mcast_loopback,
1302 enum mlx4_protocol prot, u64 *reg_id)
1304 enum mlx4_steer_type steer;
1305 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
1307 switch (dev->caps.steering_mode) {
1308 case MLX4_STEERING_MODE_A0:
1309 if (prot == MLX4_PROT_ETH)
1312 case MLX4_STEERING_MODE_B0:
1313 if (prot == MLX4_PROT_ETH)
1314 gid[7] |= (steer << 1);
1316 if (mlx4_is_mfunc(dev))
1317 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1318 block_mcast_loopback, prot);
1319 return mlx4_qp_attach_common(dev, qp, gid,
1320 block_mcast_loopback, prot,
1323 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1324 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
1325 block_mcast_loopback,
1331 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
1333 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1334 enum mlx4_protocol prot, u64 reg_id)
1336 enum mlx4_steer_type steer;
1337 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
1339 switch (dev->caps.steering_mode) {
1340 case MLX4_STEERING_MODE_A0:
1341 if (prot == MLX4_PROT_ETH)
1344 case MLX4_STEERING_MODE_B0:
1345 if (prot == MLX4_PROT_ETH)
1346 gid[7] |= (steer << 1);
1348 if (mlx4_is_mfunc(dev))
1349 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1351 return mlx4_qp_detach_common(dev, qp, gid, prot,
1354 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1355 return mlx4_flow_detach(dev, reg_id);
1361 EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
1363 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1364 u32 qpn, enum mlx4_net_trans_promisc_mode mode)
1366 struct mlx4_net_trans_rule rule;
1370 case MLX4_FS_ALL_DEFAULT:
1371 regid_p = &dev->regid_promisc_array[port];
1373 case MLX4_FS_MC_DEFAULT:
1374 regid_p = &dev->regid_allmulti_array[port];
1383 rule.promisc_mode = mode;
1386 INIT_LIST_HEAD(&rule.list);
1387 mlx4_err(dev, "going promisc on %x\n", port);
1389 return mlx4_flow_attach(dev, &rule, regid_p);
1391 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
1393 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1394 enum mlx4_net_trans_promisc_mode mode)
1400 case MLX4_FS_ALL_DEFAULT:
1401 regid_p = &dev->regid_promisc_array[port];
1403 case MLX4_FS_MC_DEFAULT:
1404 regid_p = &dev->regid_allmulti_array[port];
1413 ret = mlx4_flow_detach(dev, *regid_p);
1419 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
1421 int mlx4_unicast_attach(struct mlx4_dev *dev,
1422 struct mlx4_qp *qp, u8 gid[16],
1423 int block_mcast_loopback, enum mlx4_protocol prot)
1425 if (prot == MLX4_PROT_ETH)
1426 gid[7] |= (MLX4_UC_STEER << 1);
1428 if (mlx4_is_mfunc(dev))
1429 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1430 block_mcast_loopback, prot);
1432 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
1433 prot, MLX4_UC_STEER);
1435 EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
1437 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1438 u8 gid[16], enum mlx4_protocol prot)
1440 if (prot == MLX4_PROT_ETH)
1441 gid[7] |= (MLX4_UC_STEER << 1);
1443 if (mlx4_is_mfunc(dev))
1444 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1446 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
1448 EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
1450 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1451 struct mlx4_vhcr *vhcr,
1452 struct mlx4_cmd_mailbox *inbox,
1453 struct mlx4_cmd_mailbox *outbox,
1454 struct mlx4_cmd_info *cmd)
1456 u32 qpn = (u32) vhcr->in_param & 0xffffffff;
1457 u8 port = vhcr->in_param >> 62;
1458 enum mlx4_steer_type steer = vhcr->in_modifier;
1460 /* Promiscuous unicast is not allowed in mfunc for VFs */
1461 if ((slave != dev->caps.function) && (steer == MLX4_UC_STEER))
1464 if (vhcr->op_modifier)
1465 return add_promisc_qp(dev, port, steer, qpn);
1467 return remove_promisc_qp(dev, port, steer, qpn);
1470 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
1471 enum mlx4_steer_type steer, u8 add, u8 port)
1473 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
1474 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
1478 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1480 if (mlx4_is_mfunc(dev))
1481 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
1483 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1485 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
1487 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1489 if (mlx4_is_mfunc(dev))
1490 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
1492 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1494 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
1496 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1498 if (mlx4_is_mfunc(dev))
1499 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
1501 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1503 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
1505 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1507 if (mlx4_is_mfunc(dev))
1508 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
1510 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1512 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
1514 int mlx4_init_mcg_table(struct mlx4_dev *dev)
1516 struct mlx4_priv *priv = mlx4_priv(dev);
1519 /* No need for mcg_table when fw managed the mcg table*/
1520 if (dev->caps.steering_mode ==
1521 MLX4_STEERING_MODE_DEVICE_MANAGED)
1523 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1524 dev->caps.num_amgms - 1, 0, 0);
1528 mutex_init(&priv->mcg_table.mutex);
1533 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
1535 if (dev->caps.steering_mode !=
1536 MLX4_STEERING_MODE_DEVICE_MANAGED)
1537 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);