2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2018 Chelsio Communications, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
30 #include "opt_inet6.h"
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
34 #include <sys/fnv_hash.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
44 #include <netinet/in.h>
46 #include "common/common.h"
47 #include "common/t4_msg.h"
48 #include "common/t4_regs.h"
49 #include "common/t4_regs_values.h"
50 #include "common/t4_tcb.h"
55 LIST_ENTRY(filter_entry) link_4t;
56 LIST_ENTRY(filter_entry) link_tid;
58 uint32_t valid:1; /* filter allocated and valid */
59 uint32_t locked:1; /* filter is administratively locked or busy */
60 uint32_t pending:1; /* filter action is pending firmware reply */
61 int tid; /* tid of the filter TCB */
62 struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
63 struct smt_entry *smt; /* SMT entry for SMAC rewrite */
65 struct t4_filter_specification fs;
68 static void free_filter_resources(struct filter_entry *);
69 static int get_tcamfilter(struct adapter *, struct t4_filter *);
70 static int get_hashfilter(struct adapter *, struct t4_filter *);
71 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
72 struct l2t_entry *, struct smt_entry *);
73 static int del_hashfilter(struct adapter *, struct t4_filter *);
74 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
77 separate_hpfilter_region(struct adapter *sc)
80 return (chip_id(sc) >= CHELSIO_T6);
83 static inline uint32_t
84 hf_hashfn_4t(struct t4_filter_specification *fs)
86 struct t4_filter_tuple *ft = &fs->val;
91 hash = fnv_32_buf(&ft->sip[0], 16, FNV1_32_INIT);
92 hash = fnv_32_buf(&ft->dip[0], 16, hash);
94 hash = fnv_32_buf(&ft->sip[0], 4, FNV1_32_INIT);
95 hash = fnv_32_buf(&ft->dip[0], 4, hash);
97 hash = fnv_32_buf(&ft->sport, sizeof(ft->sport), hash);
98 hash = fnv_32_buf(&ft->dport, sizeof(ft->dport), hash);
103 static inline uint32_t
104 hf_hashfn_tid(int tid)
107 return (fnv_32_buf(&tid, sizeof(tid), FNV1_32_INIT));
111 alloc_hftid_hash(struct tid_info *t, int flags)
116 MPASS(t->hftid_hash_4t == NULL);
117 MPASS(t->hftid_hash_tid == NULL);
119 n = max(t->ntids / 1024, 16);
120 t->hftid_hash_4t = hashinit_flags(n, M_CXGBE, &t->hftid_4t_mask, flags);
121 if (t->hftid_hash_4t == NULL)
123 t->hftid_hash_tid = hashinit_flags(n, M_CXGBE, &t->hftid_tid_mask,
125 if (t->hftid_hash_tid == NULL) {
126 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
127 t->hftid_hash_4t = NULL;
131 mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
132 cv_init(&t->hftid_cv, "t4hfcv");
138 free_hftid_hash(struct tid_info *t)
140 struct filter_entry *f, *ftmp;
141 LIST_HEAD(, filter_entry) *head;
147 if (t->tids_in_use > 0) {
148 /* Remove everything from the tid hash. */
149 head = t->hftid_hash_tid;
150 for (i = 0; i <= t->hftid_tid_mask; i++) {
151 LIST_FOREACH_SAFE(f, &head[i], link_tid, ftmp) {
152 LIST_REMOVE(f, link_tid);
156 /* Remove and then free each filter in the 4t hash. */
157 head = t->hftid_hash_4t;
158 for (i = 0; i <= t->hftid_4t_mask; i++) {
159 LIST_FOREACH_SAFE(f, &head[i], link_4t, ftmp) {
161 n += f->fs.type ? 2 : 1;
163 LIST_REMOVE(f, link_4t);
167 MPASS(t->tids_in_use == n);
171 if (t->hftid_hash_4t) {
172 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
173 t->hftid_hash_4t = NULL;
175 if (t->hftid_hash_tid) {
176 hashdestroy(t->hftid_hash_tid, M_CXGBE, t->hftid_tid_mask);
177 t->hftid_hash_tid = NULL;
179 if (mtx_initialized(&t->hftid_lock)) {
180 mtx_destroy(&t->hftid_lock);
181 cv_destroy(&t->hftid_cv);
186 insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash)
188 struct tid_info *t = &sc->tids;
189 LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
193 hash = hf_hashfn_4t(&f->fs);
194 LIST_INSERT_HEAD(&head[hash & t->hftid_4t_mask], f, link_4t);
195 atomic_add_int(&t->tids_in_use, f->fs.type ? 2 : 1);
199 insert_hftid(struct adapter *sc, struct filter_entry *f)
201 struct tid_info *t = &sc->tids;
202 LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
205 MPASS(f->tid >= t->tid_base);
206 MPASS(f->tid - t->tid_base < t->ntids);
207 mtx_assert(&t->hftid_lock, MA_OWNED);
209 hash = hf_hashfn_tid(f->tid);
210 LIST_INSERT_HEAD(&head[hash & t->hftid_tid_mask], f, link_tid);
214 filter_eq(struct t4_filter_specification *fs1,
215 struct t4_filter_specification *fs2)
219 MPASS(fs1->hash && fs2->hash);
221 if (fs1->type != fs2->type)
224 n = fs1->type ? 16 : 4;
225 if (bcmp(&fs1->val.sip[0], &fs2->val.sip[0], n) ||
226 bcmp(&fs1->val.dip[0], &fs2->val.dip[0], n) ||
227 fs1->val.sport != fs2->val.sport ||
228 fs1->val.dport != fs2->val.dport)
232 * We know the masks are the same because all hashfilters conform to the
233 * global tp->filter_mask and the driver has verified that already.
236 if ((fs1->mask.pfvf_vld || fs1->mask.ovlan_vld) &&
237 fs1->val.vnic != fs2->val.vnic)
239 if (fs1->mask.vlan_vld && fs1->val.vlan != fs2->val.vlan)
241 if (fs1->mask.macidx && fs1->val.macidx != fs2->val.macidx)
243 if (fs1->mask.frag && fs1->val.frag != fs2->val.frag)
245 if (fs1->mask.matchtype && fs1->val.matchtype != fs2->val.matchtype)
247 if (fs1->mask.iport && fs1->val.iport != fs2->val.iport)
249 if (fs1->mask.fcoe && fs1->val.fcoe != fs2->val.fcoe)
251 if (fs1->mask.proto && fs1->val.proto != fs2->val.proto)
253 if (fs1->mask.tos && fs1->val.tos != fs2->val.tos)
255 if (fs1->mask.ethtype && fs1->val.ethtype != fs2->val.ethtype)
261 static struct filter_entry *
262 lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash)
264 struct tid_info *t = &sc->tids;
265 LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
266 struct filter_entry *f;
268 mtx_assert(&t->hftid_lock, MA_OWNED);
272 hash = hf_hashfn_4t(fs);
274 LIST_FOREACH(f, &head[hash & t->hftid_4t_mask], link_4t) {
275 if (filter_eq(&f->fs, fs))
282 static struct filter_entry *
283 lookup_hftid(struct adapter *sc, int tid)
285 struct tid_info *t = &sc->tids;
286 LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
287 struct filter_entry *f;
290 mtx_assert(&t->hftid_lock, MA_OWNED);
293 hash = hf_hashfn_tid(tid);
294 LIST_FOREACH(f, &head[hash & t->hftid_tid_mask], link_tid) {
303 remove_hf(struct adapter *sc, struct filter_entry *f)
305 struct tid_info *t = &sc->tids;
307 mtx_assert(&t->hftid_lock, MA_OWNED);
309 LIST_REMOVE(f, link_4t);
310 atomic_subtract_int(&t->tids_in_use, f->fs.type ? 2 : 1);
314 remove_hftid(struct adapter *sc, struct filter_entry *f)
317 struct tid_info *t = &sc->tids;
319 mtx_assert(&t->hftid_lock, MA_OWNED);
322 LIST_REMOVE(f, link_tid);
326 * Input: driver's 32b filter mode.
327 * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
330 mode_to_fconf(uint32_t mode)
334 if (mode & T4_FILTER_IP_FRAGMENT)
335 fconf |= F_FRAGMENTATION;
337 if (mode & T4_FILTER_MPS_HIT_TYPE)
338 fconf |= F_MPSHITTYPE;
340 if (mode & T4_FILTER_MAC_IDX)
343 if (mode & T4_FILTER_ETH_TYPE)
344 fconf |= F_ETHERTYPE;
346 if (mode & T4_FILTER_IP_PROTO)
349 if (mode & T4_FILTER_IP_TOS)
352 if (mode & T4_FILTER_VLAN)
355 if (mode & T4_FILTER_VNIC)
358 if (mode & T4_FILTER_PORT)
361 if (mode & T4_FILTER_FCoE)
368 * Input: driver's 32b filter mode.
369 * Returns: hardware vnic mode (ingress config) matching the input.
372 mode_to_iconf(uint32_t mode)
374 if ((mode & T4_FILTER_VNIC) == 0)
375 return (-1); /* ingress config doesn't matter. */
377 if (mode & T4_FILTER_IC_VNIC)
378 return (FW_VNIC_MODE_PF_VF);
379 else if (mode & T4_FILTER_IC_ENCAP)
380 return (FW_VNIC_MODE_ENCAP_EN);
382 return (FW_VNIC_MODE_OUTER_VLAN);
386 check_fspec_against_fconf_iconf(struct adapter *sc,
387 struct t4_filter_specification *fs)
389 struct tp_params *tpp = &sc->params.tp;
392 if (fs->val.frag || fs->mask.frag)
393 fconf |= F_FRAGMENTATION;
395 if (fs->val.matchtype || fs->mask.matchtype)
396 fconf |= F_MPSHITTYPE;
398 if (fs->val.macidx || fs->mask.macidx)
401 if (fs->val.ethtype || fs->mask.ethtype)
402 fconf |= F_ETHERTYPE;
404 if (fs->val.proto || fs->mask.proto)
407 if (fs->val.tos || fs->mask.tos)
410 if (fs->val.vlan_vld || fs->mask.vlan_vld)
413 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
414 if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
419 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
420 if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
426 if (fs->val.encap_vld || fs->mask.encap_vld) {
427 if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
433 if (fs->val.iport || fs->mask.iport)
436 if (fs->val.fcoe || fs->mask.fcoe)
439 if ((tpp->filter_mode | fconf) != tpp->filter_mode)
446 * Input: hardware filter configuration (filter mode/mask, ingress config).
447 * Input: driver's 32b filter mode matching the input.
450 fconf_to_mode(uint16_t hwmode, int vnic_mode)
452 uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
453 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
455 if (hwmode & F_FRAGMENTATION)
456 mode |= T4_FILTER_IP_FRAGMENT;
457 if (hwmode & F_MPSHITTYPE)
458 mode |= T4_FILTER_MPS_HIT_TYPE;
459 if (hwmode & F_MACMATCH)
460 mode |= T4_FILTER_MAC_IDX;
461 if (hwmode & F_ETHERTYPE)
462 mode |= T4_FILTER_ETH_TYPE;
463 if (hwmode & F_PROTOCOL)
464 mode |= T4_FILTER_IP_PROTO;
466 mode |= T4_FILTER_IP_TOS;
468 mode |= T4_FILTER_VLAN;
469 if (hwmode & F_VNIC_ID)
470 mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
472 mode |= T4_FILTER_PORT;
474 mode |= T4_FILTER_FCoE;
477 case FW_VNIC_MODE_PF_VF:
478 mode |= T4_FILTER_IC_VNIC;
480 case FW_VNIC_MODE_ENCAP_EN:
481 mode |= T4_FILTER_IC_ENCAP;
483 case FW_VNIC_MODE_OUTER_VLAN:
492 get_filter_mode(struct adapter *sc, uint32_t *mode)
494 struct tp_params *tp = &sc->params.tp;
495 uint16_t filter_mode;
497 /* Filter mask must comply with the global filter mode. */
498 MPASS((tp->filter_mode | tp->filter_mask) == tp->filter_mode);
500 /* Non-zero incoming value in mode means "hashfilter mode". */
501 filter_mode = *mode ? tp->filter_mask : tp->filter_mode;
502 *mode = fconf_to_mode(filter_mode, tp->vnic_mode);
508 set_filter_mode(struct adapter *sc, uint32_t mode)
510 struct tp_params *tp = &sc->params.tp;
514 iconf = mode_to_iconf(mode);
515 fconf = mode_to_fconf(mode);
516 if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode)
517 return (0); /* Nothing to do */
519 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setfm");
523 if (hw_off_limits(sc)) {
528 if (sc->tids.ftids_in_use > 0 || /* TCAM filters active */
529 sc->tids.hpftids_in_use > 0 || /* hi-pri TCAM filters active */
530 sc->tids.tids_in_use > 0) { /* TOE or hashfilters active */
536 if (uld_active(sc, ULD_TOM)) {
542 /* Note that filter mask will get clipped to the new filter mode. */
543 rc = -t4_set_filter_cfg(sc, fconf, -1, iconf);
545 end_synchronized_op(sc, 0);
550 set_filter_mask(struct adapter *sc, uint32_t mode)
552 struct tp_params *tp = &sc->params.tp;
556 iconf = mode_to_iconf(mode);
557 fmask = mode_to_fconf(mode);
558 if ((iconf == -1 || iconf == tp->vnic_mode) && fmask == tp->filter_mask)
559 return (0); /* Nothing to do */
562 * We aren't going to change the global filter mode or VNIC mode here.
563 * The given filter mask must conform to them.
565 if ((fmask | tp->filter_mode) != tp->filter_mode)
567 if (iconf != -1 && iconf != tp->vnic_mode)
570 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sethfm");
574 if (hw_off_limits(sc)) {
579 if (sc->tids.tids_in_use > 0) { /* TOE or hashfilters active */
585 if (uld_active(sc, ULD_TOM)) {
590 rc = -t4_set_filter_cfg(sc, -1, fmask, -1);
592 end_synchronized_op(sc, 0);
596 static inline uint64_t
597 get_filter_hits(struct adapter *sc, uint32_t tid)
602 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
604 mtx_lock(&sc->reg_lock);
605 if (hw_off_limits(sc))
607 else if (is_t4(sc)) {
610 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&t, 8);
615 read_via_memwin(sc, 0, tcb_addr + 24, &t, 4);
618 mtx_unlock(&sc->reg_lock);
624 get_filter(struct adapter *sc, struct t4_filter *t)
627 return (get_hashfilter(sc, t));
629 return (get_tcamfilter(sc, t));
633 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
634 struct smt_entry *smt)
636 struct filter_entry *f;
637 struct fw_filter2_wr *fwr;
638 u_int vnic_vld, vnic_vld_mask;
639 struct wrq_cookie cookie;
640 int i, rc, busy, locked;
642 const int ntids = t->fs.type ? 4 : 1;
645 /* Already validated against fconf, iconf */
646 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
647 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
649 if (separate_hpfilter_region(sc) && t->fs.prio) {
650 MPASS(t->idx < sc->tids.nhpftids);
651 f = &sc->tids.hpftid_tab[t->idx];
652 tid = sc->tids.hpftid_base + t->idx;
654 MPASS(t->idx < sc->tids.nftids);
655 f = &sc->tids.ftid_tab[t->idx];
656 tid = sc->tids.ftid_base + t->idx;
658 rc = busy = locked = 0;
659 mtx_lock(&sc->tids.ftid_lock);
660 for (i = 0; i < ntids; i++) {
661 busy += f[i].pending + f[i].valid;
662 locked += f[i].locked;
671 if (sc->params.filter2_wr_support)
672 len16 = howmany(sizeof(struct fw_filter2_wr), 16);
674 len16 = howmany(sizeof(struct fw_filter_wr), 16);
675 fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
676 if (__predict_false(fwr == NULL))
680 if (separate_hpfilter_region(sc) && t->fs.prio)
681 sc->tids.hpftids_in_use++;
683 sc->tids.ftids_in_use++;
686 mtx_unlock(&sc->tids.ftid_lock);
691 * Can't fail now. A set-filter WR will definitely be sent.
699 if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
703 if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
708 bzero(fwr, sizeof(*fwr));
709 if (sc->params.filter2_wr_support)
710 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
712 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
713 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
715 htobe32(V_FW_FILTER_WR_TID(f->tid) |
716 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
717 V_FW_FILTER_WR_NOREPLY(0) |
718 V_FW_FILTER_WR_IQ(f->fs.iq));
719 fwr->del_filter_to_l2tix =
720 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
721 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
722 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
723 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
724 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
725 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
726 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
727 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
728 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
729 f->fs.newvlan == VLAN_REWRITE) |
730 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
731 f->fs.newvlan == VLAN_REWRITE) |
732 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
733 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
734 V_FW_FILTER_WR_PRIO(f->fs.prio) |
735 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
736 fwr->ethtype = htobe16(f->fs.val.ethtype);
737 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
738 fwr->frag_to_ovlan_vldm =
739 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
740 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
741 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
742 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
743 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
744 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
746 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
747 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
748 fwr->maci_to_matchtypem =
749 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
750 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
751 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
752 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
753 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
754 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
755 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
756 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
757 fwr->ptcl = f->fs.val.proto;
758 fwr->ptclm = f->fs.mask.proto;
759 fwr->ttyp = f->fs.val.tos;
760 fwr->ttypm = f->fs.mask.tos;
761 fwr->ivlan = htobe16(f->fs.val.vlan);
762 fwr->ivlanm = htobe16(f->fs.mask.vlan);
763 fwr->ovlan = htobe16(f->fs.val.vnic);
764 fwr->ovlanm = htobe16(f->fs.mask.vnic);
765 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
766 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
767 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
768 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
769 fwr->lp = htobe16(f->fs.val.dport);
770 fwr->lpm = htobe16(f->fs.mask.dport);
771 fwr->fp = htobe16(f->fs.val.sport);
772 fwr->fpm = htobe16(f->fs.mask.sport);
773 /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
774 bzero(fwr->sma, sizeof (fwr->sma));
775 if (sc->params.filter2_wr_support) {
776 fwr->filter_type_swapmac =
777 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
778 fwr->natmode_to_ulp_type =
779 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
780 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
781 V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
782 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
783 memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
784 memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
785 fwr->newlport = htobe16(f->fs.nat_dport);
786 fwr->newfport = htobe16(f->fs.nat_sport);
787 fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
789 commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
791 /* Wait for response. */
792 mtx_lock(&sc->tids.ftid_lock);
794 if (f->pending == 0) {
795 rc = f->valid ? 0 : EIO;
798 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
803 mtx_unlock(&sc->tids.ftid_lock);
808 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
811 struct tp_params *tp = &sc->params.tp;
817 * Initialize each of the fields which we care about which are present
818 * in the Compressed Filter Tuple.
820 if (tp->vlan_shift >= 0 && fs->mask.vlan) {
821 *ftuple |= (uint64_t)(F_FT_VLAN_VLD | fs->val.vlan) <<
826 if (tp->port_shift >= 0 && fs->mask.iport) {
827 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
831 if (tp->protocol_shift >= 0 && fs->mask.proto) {
832 *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
836 if (tp->tos_shift >= 0 && fs->mask.tos) {
837 *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
841 if (tp->vnic_shift >= 0 && fs->mask.vnic) {
842 /* vnic_mode was already validated. */
843 if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
844 MPASS(fs->mask.pfvf_vld);
845 else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
846 MPASS(fs->mask.ovlan_vld);
848 else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
849 MPASS(fs->mask.encap_vld);
851 *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
855 if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
856 *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
860 if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
861 *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
862 fmask |= F_ETHERTYPE;
865 if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
866 *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
867 fmask |= F_MPSHITTYPE;
870 if (tp->frag_shift >= 0 && fs->mask.frag) {
871 *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
872 fmask |= F_FRAGMENTATION;
875 if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
876 *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
880 /* A hashfilter must conform to the hardware filter mask. */
881 if (fmask != tp->filter_mask)
888 is_4tuple_specified(struct t4_filter_specification *fs)
891 const int n = fs->type ? 16 : 4;
893 if (fs->mask.sport != 0xffff || fs->mask.dport != 0xffff)
896 for (i = 0; i < n; i++) {
897 if (fs->mask.sip[i] != 0xff)
899 if (fs->mask.dip[i] != 0xff)
907 set_filter(struct adapter *sc, struct t4_filter *t)
909 struct tid_info *ti = &sc->tids;
910 struct l2t_entry *l2te = NULL;
911 struct smt_entry *smt = NULL;
916 * Basic filter checks first.
920 if (!is_hashfilter(sc) || ti->ntids == 0)
922 /* Hardware, not user, selects a tid for hashfilters. */
923 if (t->idx != (uint32_t)-1)
925 /* T5 can't count hashfilter hits. */
926 if (is_t5(sc) && t->fs.hitcnts)
928 if (!is_4tuple_specified(&t->fs))
930 rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
934 if (separate_hpfilter_region(sc) && t->fs.prio) {
935 if (ti->nhpftids == 0)
937 if (t->idx >= ti->nhpftids)
942 if (t->idx >= ti->nftids)
945 /* IPv6 filter idx must be 4 aligned */
946 if (t->fs.type == 1 &&
947 ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
951 /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
952 if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
953 (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
954 t->fs.swapmac || t->fs.nat_mode))
957 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
959 if (t->fs.val.iport >= sc->params.nports)
962 /* Can't specify an iqid/rss_info if not steering. */
963 if (!t->fs.dirsteer && !t->fs.dirsteerhash && !t->fs.maskhash && t->fs.iq)
966 /* Validate against the global filter mode and ingress config */
967 rc = check_fspec_against_fconf_iconf(sc, &t->fs);
972 * Basic checks passed. Make sure the queues and tid tables are setup.
975 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
979 if (hw_off_limits(sc)) {
984 if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
988 if (__predict_false(ti->hftid_hash_4t == NULL)) {
989 rc = alloc_hftid_hash(&sc->tids, HASH_NOWAIT);
993 } else if (separate_hpfilter_region(sc) && t->fs.prio &&
994 __predict_false(ti->hpftid_tab == NULL)) {
995 MPASS(ti->nhpftids != 0);
996 KASSERT(ti->hpftids_in_use == 0,
997 ("%s: no memory allocated but hpftids_in_use is %u",
998 __func__, ti->hpftids_in_use));
999 ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
1000 ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
1001 if (ti->hpftid_tab == NULL) {
1005 if (!mtx_initialized(&sc->tids.ftid_lock)) {
1006 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1007 cv_init(&ti->ftid_cv, "t4fcv");
1009 } else if (__predict_false(ti->ftid_tab == NULL)) {
1010 MPASS(ti->nftids != 0);
1011 KASSERT(ti->ftids_in_use == 0,
1012 ("%s: no memory allocated but ftids_in_use is %u",
1013 __func__, ti->ftids_in_use));
1014 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
1015 M_CXGBE, M_NOWAIT | M_ZERO);
1016 if (ti->ftid_tab == NULL) {
1020 if (!mtx_initialized(&sc->tids.ftid_lock)) {
1021 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1022 cv_init(&ti->ftid_cv, "t4fcv");
1026 end_synchronized_op(sc, 0);
1031 * Allocate L2T entry, SMT entry, etc.
1034 if (t->fs.newdmac || t->fs.newvlan) {
1035 /* This filter needs an L2T entry; allocate one. */
1036 l2te = t4_l2t_alloc_switching(sc, t->fs.vlan, t->fs.eport,
1038 if (__predict_false(l2te == NULL)) {
1044 if (t->fs.newsmac) {
1045 /* This filter needs an SMT entry; allocate one. */
1046 smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
1047 if (__predict_false(smt == NULL)) {
1051 rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
1057 rc = set_hashfilter(sc, t, ftuple, l2te, smt);
1059 rc = set_tcamfilter(sc, t, l2te, smt);
1061 if (rc != 0 && rc != EINPROGRESS) {
1064 t4_l2t_release(l2te);
1066 t4_smt_release(smt);
1072 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
1074 struct filter_entry *f;
1075 struct fw_filter_wr *fwr;
1076 struct wrq_cookie cookie;
1082 mtx_lock(&sc->tids.ftid_lock);
1083 if (separate_hpfilter_region(sc) && t->fs.prio) {
1084 nfilters = sc->tids.nhpftids;
1085 f = sc->tids.hpftid_tab;
1087 tid_base = sc->tids.hpftid_base;
1090 nfilters = sc->tids.nftids;
1091 f = sc->tids.ftid_tab;
1093 tid_base = sc->tids.ftid_base;
1096 MPASS(f != NULL); /* Caller checked this. */
1097 if (t->idx >= nfilters) {
1111 if (f->valid == 0) {
1115 MPASS(f->tid == tid_base + t->idx);
1116 fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
1122 bzero(fwr, sizeof (*fwr));
1123 t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
1125 commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
1126 t->fs = f->fs; /* extra info for the caller */
1129 if (f->pending == 0) {
1130 rc = f->valid ? EIO : 0;
1133 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
1139 mtx_unlock(&sc->tids.ftid_lock);
1144 del_filter(struct adapter *sc, struct t4_filter *t)
1147 /* No filters possible if not initialized yet. */
1148 if (!(sc->flags & FULL_INIT_DONE))
1152 * The checks for tid tables ensure that the locks that del_* will reach
1153 * for are initialized.
1156 if (sc->tids.hftid_hash_4t != NULL)
1157 return (del_hashfilter(sc, t));
1158 } else if (separate_hpfilter_region(sc) && t->fs.prio) {
1159 if (sc->tids.hpftid_tab != NULL)
1160 return (del_tcamfilter(sc, t));
1162 if (sc->tids.ftid_tab != NULL)
1163 return (del_tcamfilter(sc, t));
1170 * Release secondary resources associated with the filter.
1173 free_filter_resources(struct filter_entry *f)
1177 t4_l2t_release(f->l2te);
1181 t4_smt_release(f->smt);
1187 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1188 uint64_t val, int no_reply)
1190 struct wrq_cookie cookie;
1191 struct cpl_set_tcb_field *req;
1193 req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
1196 bzero(req, sizeof(*req));
1197 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1198 if (no_reply == 0) {
1199 req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1202 req->reply_ctrl = htobe16(V_NO_REPLY(1));
1203 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1204 req->mask = htobe64(mask);
1205 req->val = htobe64(val);
1206 commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
1211 /* Set one of the t_flags bits in the TCB. */
1213 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
1217 return (set_tcb_field(sc, tid, W_TCB_T_FLAGS, 1ULL << bit_pos,
1218 (uint64_t)val << bit_pos, no_reply));
1222 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1224 struct adapter *sc = iq->adapter;
1225 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1226 u_int tid = GET_TID(rpl);
1228 struct filter_entry *f;
1230 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
1234 if (is_hpftid(sc, tid)) {
1235 idx = tid - sc->tids.hpftid_base;
1236 f = &sc->tids.hpftid_tab[idx];
1237 } else if (is_ftid(sc, tid)) {
1238 idx = tid - sc->tids.ftid_base;
1239 f = &sc->tids.ftid_tab[idx];
1241 panic("%s: FW reply for invalid TID %d.", __func__, tid);
1243 MPASS(f->tid == tid);
1244 rc = G_COOKIE(rpl->cookie);
1246 mtx_lock(&sc->tids.ftid_lock);
1247 KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
1248 __func__, rc, tid));
1250 case FW_FILTER_WR_FLT_ADDED:
1251 /* set-filter succeeded */
1253 if (f->fs.newsmac) {
1254 MPASS(f->smt != NULL);
1255 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1256 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1257 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1258 V_TCB_SMAC_SEL(f->smt->idx), 1);
1259 /* XXX: wait for reply to TCB update before !pending */
1262 case FW_FILTER_WR_FLT_DELETED:
1263 /* del-filter succeeded */
1264 MPASS(f->valid == 1);
1267 case FW_FILTER_WR_SMT_TBL_FULL:
1268 /* set-filter failed due to lack of SMT space. */
1269 MPASS(f->valid == 0);
1270 free_filter_resources(f);
1271 if (separate_hpfilter_region(sc) && f->fs.prio)
1272 sc->tids.hpftids_in_use--;
1274 sc->tids.ftids_in_use--;
1276 case FW_FILTER_WR_SUCCESS:
1277 case FW_FILTER_WR_EINVAL:
1279 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
1283 cv_broadcast(&sc->tids.ftid_cv);
1284 mtx_unlock(&sc->tids.ftid_lock);
1290 * This is the reply to the Active Open that created the filter. Additional TCB
1291 * updates may be required to complete the filter configuration.
1294 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1297 struct adapter *sc = iq->adapter;
1298 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1299 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1300 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1301 struct filter_entry *f = lookup_atid(sc, atid);
1303 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1305 mtx_lock(&sc->tids.hftid_lock);
1306 KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1307 KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1308 __func__, f, f->tid));
1309 if (status == CPL_ERR_NONE) {
1310 f->tid = GET_TID(cpl);
1311 MPASS(lookup_hftid(sc, f->tid) == NULL);
1312 insert_hftid(sc, f);
1314 * Leave the filter pending until it is fully set up, which will
1315 * be indicated by the reply to the last TCB update. No need to
1316 * unblock the ioctl thread either.
1318 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1323 /* provide errno instead of tid to ioctl */
1324 f->tid = act_open_rpl_status_to_errno(status);
1327 if (act_open_has_tid(status))
1328 release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1329 free_filter_resources(f);
1334 cv_broadcast(&sc->tids.hftid_cv);
1336 mtx_unlock(&sc->tids.hftid_lock);
1338 free_atid(sc, atid);
1343 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1346 struct adapter *sc = iq->adapter;
1347 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1348 u_int tid = GET_TID(rpl);
1349 struct filter_entry *f;
1351 mtx_lock(&sc->tids.hftid_lock);
1352 f = lookup_hftid(sc, tid);
1353 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1354 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1356 KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1359 if (rpl->status == 0) {
1364 free_filter_resources(f);
1365 remove_hftid(sc, f);
1367 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1371 cv_broadcast(&sc->tids.hftid_cv);
1372 mtx_unlock(&sc->tids.hftid_lock);
1378 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1381 struct adapter *sc = iq->adapter;
1382 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1383 unsigned int tid = GET_TID(cpl);
1384 struct filter_entry *f;
1386 mtx_lock(&sc->tids.hftid_lock);
1387 f = lookup_hftid(sc, tid);
1388 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1389 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1391 KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1394 if (cpl->status == 0) {
1396 free_filter_resources(f);
1397 remove_hftid(sc, f);
1399 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1403 cv_broadcast(&sc->tids.hftid_cv);
1404 mtx_unlock(&sc->tids.hftid_lock);
1410 get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1413 struct filter_entry *f;
1421 if (separate_hpfilter_region(sc) && t->fs.prio) {
1422 nfilters = sc->tids.nhpftids;
1423 f = sc->tids.hpftid_tab;
1424 in_use = sc->tids.hpftids_in_use;
1426 tid_base = sc->tids.hpftid_base;
1429 nfilters = sc->tids.nftids;
1430 f = sc->tids.ftid_tab;
1431 in_use = sc->tids.ftids_in_use;
1433 tid_base = sc->tids.ftid_base;
1437 if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1438 t->idx = 0xffffffff;
1443 mtx_lock(&sc->tids.ftid_lock);
1444 for (i = t->idx; i < nfilters; i++, f++) {
1446 MPASS(f->tid == tid_base + i);
1448 t->l2tidx = f->l2te ? f->l2te->idx : 0;
1449 t->smtidx = f->smt ? f->smt->idx : 0;
1451 t->hits = get_filter_hits(sc, f->tid);
1453 t->hits = UINT64_MAX;
1459 t->idx = 0xffffffff;
1461 mtx_unlock(&sc->tids.ftid_lock);
1466 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1468 struct tid_info *ti = &sc->tids;
1470 struct filter_entry *f;
1471 const int inv_tid = ti->ntids + ti->tid_base;
1475 if (ti->tids_in_use == 0 || ti->hftid_hash_tid == NULL ||
1476 t->idx >= inv_tid) {
1477 t->idx = 0xffffffff;
1480 if (t->idx < ti->tid_base)
1481 t->idx = ti->tid_base;
1483 mtx_lock(&ti->hftid_lock);
1484 for (tid = t->idx; tid < inv_tid; tid++) {
1485 f = lookup_hftid(sc, tid);
1486 if (f != NULL && f->valid) {
1488 t->l2tidx = f->l2te ? f->l2te->idx : 0;
1489 t->smtidx = f->smt ? f->smt->idx : 0;
1491 t->hits = get_filter_hits(sc, tid);
1493 t->hits = UINT64_MAX;
1499 t->idx = 0xffffffff;
1501 mtx_unlock(&ti->hftid_lock);
1506 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1507 uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1509 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1510 struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1512 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1513 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1516 if (chip_id(sc) == CHELSIO_T5) {
1517 INIT_TP_WR(cpl5, 0);
1519 INIT_TP_WR(cpl6, 0);
1524 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1525 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1526 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1527 cpl->local_port = htobe16(f->fs.val.dport);
1528 cpl->peer_port = htobe16(f->fs.val.sport);
1529 cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1530 cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1531 cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1532 cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1533 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1534 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1535 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1536 V_NO_CONG(f->fs.rpttid) |
1537 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1538 F_TCAM_BYPASS | F_NON_OFFLOAD);
1540 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1541 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1542 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1543 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1544 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1545 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1546 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1550 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1551 uint64_t ftuple, struct cpl_act_open_req *cpl)
1553 struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1554 struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1556 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1557 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1560 if (chip_id(sc) == CHELSIO_T5) {
1561 INIT_TP_WR(cpl5, 0);
1563 INIT_TP_WR(cpl6, 0);
1568 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1569 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1570 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1571 cpl->local_port = htobe16(f->fs.val.dport);
1572 cpl->peer_port = htobe16(f->fs.val.sport);
1573 cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1574 f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1575 cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1576 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1577 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1578 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1579 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1580 V_NO_CONG(f->fs.rpttid) |
1581 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1582 F_TCAM_BYPASS | F_NON_OFFLOAD);
1584 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1585 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1586 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1587 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1588 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1589 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1590 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1594 act_open_cpl_len16(struct adapter *sc, int isipv6)
1597 static const int sz_table[3][2] = {
1599 howmany(sizeof (struct cpl_act_open_req), 16),
1600 howmany(sizeof (struct cpl_act_open_req6), 16)
1603 howmany(sizeof (struct cpl_t5_act_open_req), 16),
1604 howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1607 howmany(sizeof (struct cpl_t6_act_open_req), 16),
1608 howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1612 MPASS(chip_id(sc) >= CHELSIO_T4);
1613 idx = min(chip_id(sc) - CHELSIO_T4, 2);
1615 return (sz_table[idx][!!isipv6]);
1619 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1620 struct l2t_entry *l2te, struct smt_entry *smt)
1623 struct wrq_cookie cookie;
1624 struct filter_entry *f;
1629 /* Already validated against fconf, iconf */
1630 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1631 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1633 hash = hf_hashfn_4t(&t->fs);
1635 mtx_lock(&sc->tids.hftid_lock);
1636 if (lookup_hf(sc, &t->fs, hash) != NULL) {
1641 f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1642 if (__predict_false(f == NULL)) {
1650 atid = alloc_atid(sc, f);
1651 if (__predict_false(atid) == -1) {
1658 wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1661 free_atid(sc, atid);
1667 mk_act_open_req6(sc, f, atid, ftuple, wr);
1669 mk_act_open_req(sc, f, atid, ftuple, wr);
1671 f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1674 insert_hf(sc, f, hash);
1675 commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1679 if (f->pending == 0) {
1690 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1697 mtx_unlock(&sc->tids.hftid_lock);
1701 /* SET_TCB_FIELD sent as a ULP command looks like this */
1702 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1703 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1706 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1707 uint64_t val, uint32_t tid, uint32_t qid)
1709 struct ulptx_idata *ulpsc;
1710 struct cpl_set_tcb_field_core *req;
1712 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1713 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1715 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1716 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1717 ulpsc->len = htobe32(sizeof(*req));
1719 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1720 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1721 req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1722 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1723 req->mask = htobe64(mask);
1724 req->val = htobe64(val);
1726 ulpsc = (struct ulptx_idata *)(req + 1);
1727 if (LEN__SET_TCB_FIELD_ULP % 16) {
1728 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1729 ulpsc->len = htobe32(0);
1735 /* ABORT_REQ sent as a ULP command looks like this */
1736 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1737 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1740 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1742 struct ulptx_idata *ulpsc;
1743 struct cpl_abort_req_core *req;
1745 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1746 ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1748 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1749 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1750 ulpsc->len = htobe32(sizeof(*req));
1752 req = (struct cpl_abort_req_core *)(ulpsc + 1);
1753 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1754 req->rsvd0 = htonl(0);
1756 req->cmd = CPL_ABORT_NO_RST;
1758 ulpsc = (struct ulptx_idata *)(req + 1);
1759 if (LEN__ABORT_REQ_ULP % 16) {
1760 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1761 ulpsc->len = htobe32(0);
1767 /* ABORT_RPL sent as a ULP command looks like this */
1768 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1769 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1772 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1774 struct ulptx_idata *ulpsc;
1775 struct cpl_abort_rpl_core *rpl;
1777 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1778 ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1780 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1781 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1782 ulpsc->len = htobe32(sizeof(*rpl));
1784 rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1785 OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1786 rpl->rsvd0 = htonl(0);
1788 rpl->cmd = CPL_ABORT_NO_RST;
1790 ulpsc = (struct ulptx_idata *)(rpl + 1);
1791 if (LEN__ABORT_RPL_ULP % 16) {
1792 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1793 ulpsc->len = htobe32(0);
1800 del_hashfilter_wrlen(void)
1803 return (sizeof(struct work_request_hdr) +
1804 roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1805 roundup2(LEN__ABORT_REQ_ULP, 16) +
1806 roundup2(LEN__ABORT_RPL_ULP, 16));
1810 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1812 struct ulp_txpkt *ulpmc;
1814 INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1815 ulpmc = (struct ulp_txpkt *)(wrh + 1);
1816 ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1817 V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1818 ulpmc = mk_abort_req_ulp(ulpmc, tid);
1819 ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1823 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1825 struct tid_info *ti = &sc->tids;
1827 struct filter_entry *f;
1828 struct wrq_cookie cookie;
1830 const int wrlen = del_hashfilter_wrlen();
1831 const int inv_tid = ti->ntids + ti->tid_base;
1833 MPASS(sc->tids.hftid_hash_4t != NULL);
1834 MPASS(sc->tids.ntids > 0);
1836 if (t->idx < sc->tids.tid_base || t->idx >= inv_tid)
1839 mtx_lock(&ti->hftid_lock);
1840 f = lookup_hftid(sc, t->idx);
1841 if (f == NULL || f->valid == 0) {
1845 MPASS(f->tid == t->idx);
1854 wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
1860 mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1863 commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1864 t->fs = f->fs; /* extra info for the caller */
1868 if (f->pending == 0) {
1878 if (cv_wait_sig(&ti->hftid_cv, &ti->hftid_lock) != 0) {
1885 mtx_unlock(&ti->hftid_lock);
1889 #define WORD_MASK 0xffffffff
1891 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1892 const bool sip, const bool dp, const bool sp)
1897 set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1898 f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1899 f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1901 set_tcb_field(sc, f->tid,
1902 W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1903 f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1904 f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1906 set_tcb_field(sc, f->tid,
1907 W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1908 f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1909 f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1911 set_tcb_field(sc, f->tid,
1912 W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1913 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1914 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1916 set_tcb_field(sc, f->tid,
1917 W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1918 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1919 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1925 set_tcb_field(sc, f->tid,
1926 W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1927 f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1928 f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1930 set_tcb_field(sc, f->tid,
1931 W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1932 f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1933 f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1935 set_tcb_field(sc, f->tid,
1936 W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1937 f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1938 f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1940 set_tcb_field(sc, f->tid,
1941 W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1942 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1943 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1946 set_tcb_field(sc, f->tid,
1947 W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1948 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1949 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1953 set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1954 (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1958 * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1959 * last of the series of updates requested a reply. The reply informs the
1960 * driver that the filter is fully setup.
1963 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1967 MPASS(f->tid < sc->tids.ntids);
1970 MPASS(f->valid == 0);
1972 if (f->fs.newdmac) {
1973 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1977 if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1978 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1982 if (f->fs.newsmac) {
1983 MPASS(f->smt != NULL);
1984 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1985 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1986 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1991 switch(f->fs.nat_mode) {
1995 set_nat_params(sc, f, true, false, false, false);
1998 case NAT_MODE_DIP_DP:
1999 set_nat_params(sc, f, true, false, true, false);
2002 case NAT_MODE_DIP_DP_SIP:
2003 set_nat_params(sc, f, true, true, true, false);
2006 case NAT_MODE_DIP_DP_SP:
2007 set_nat_params(sc, f, true, false, true, true);
2010 case NAT_MODE_SIP_SP:
2011 set_nat_params(sc, f, false, true, false, true);
2014 case NAT_MODE_DIP_SIP_SP:
2015 set_nat_params(sc, f, true, true, false, true);
2019 set_nat_params(sc, f, true, true, true, true);
2023 MPASS(0); /* should have been validated earlier */
2028 if (f->fs.nat_seq_chk) {
2029 set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
2030 V_TCB_RCV_NXT(M_TCB_RCV_NXT),
2031 V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
2035 if (is_t5(sc) && f->fs.action == FILTER_DROP) {
2037 * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
2039 set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
2040 V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
2045 * Enable switching after all secondary resources (L2T entry, SMT entry,
2046 * etc.) are setup so that any switched packet will use correct
2049 if (f->fs.action == FILTER_SWITCH) {
2050 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
2054 if (f->fs.hitcnts || updated > 0) {
2055 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
2056 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
2057 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
2058 V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
2059 return (EINPROGRESS);