2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018 Chelsio Communications, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
45 #include <netinet/in.h>
47 #include "common/common.h"
48 #include "common/t4_msg.h"
49 #include "common/t4_regs.h"
50 #include "common/t4_regs_values.h"
51 #include "common/t4_tcb.h"
55 uint32_t valid:1; /* filter allocated and valid */
56 uint32_t locked:1; /* filter is administratively locked or busy */
57 uint32_t pending:1; /* filter action is pending firmware reply */
58 uint32_t smtidx:8; /* Source MAC Table index for smac */
59 int tid; /* tid of the filter TCB */
60 struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
62 struct t4_filter_specification fs;
65 static void free_filter_resources(struct filter_entry *);
66 static int get_hashfilter(struct adapter *, struct t4_filter *);
67 static int set_hashfilter(struct adapter *, struct t4_filter *,
69 static int del_hashfilter(struct adapter *, struct t4_filter *);
70 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
73 insert_hftid(struct adapter *sc, int tid, void *ctx, int ntids)
75 struct tid_info *t = &sc->tids;
77 t->hftid_tab[tid] = ctx;
78 atomic_add_int(&t->tids_in_use, ntids);
82 lookup_hftid(struct adapter *sc, int tid)
84 struct tid_info *t = &sc->tids;
86 return (t->hftid_tab[tid]);
90 remove_hftid(struct adapter *sc, int tid, int ntids)
92 struct tid_info *t = &sc->tids;
94 t->hftid_tab[tid] = NULL;
95 atomic_subtract_int(&t->tids_in_use, ntids);
99 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf)
103 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
104 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
106 if (fconf & F_FRAGMENTATION)
107 mode |= T4_FILTER_IP_FRAGMENT;
109 if (fconf & F_MPSHITTYPE)
110 mode |= T4_FILTER_MPS_HIT_TYPE;
112 if (fconf & F_MACMATCH)
113 mode |= T4_FILTER_MAC_IDX;
115 if (fconf & F_ETHERTYPE)
116 mode |= T4_FILTER_ETH_TYPE;
118 if (fconf & F_PROTOCOL)
119 mode |= T4_FILTER_IP_PROTO;
122 mode |= T4_FILTER_IP_TOS;
125 mode |= T4_FILTER_VLAN;
127 if (fconf & F_VNIC_ID) {
128 mode |= T4_FILTER_VNIC;
130 mode |= T4_FILTER_IC_VNIC;
134 mode |= T4_FILTER_PORT;
137 mode |= T4_FILTER_FCoE;
143 mode_to_fconf(uint32_t mode)
147 if (mode & T4_FILTER_IP_FRAGMENT)
148 fconf |= F_FRAGMENTATION;
150 if (mode & T4_FILTER_MPS_HIT_TYPE)
151 fconf |= F_MPSHITTYPE;
153 if (mode & T4_FILTER_MAC_IDX)
156 if (mode & T4_FILTER_ETH_TYPE)
157 fconf |= F_ETHERTYPE;
159 if (mode & T4_FILTER_IP_PROTO)
162 if (mode & T4_FILTER_IP_TOS)
165 if (mode & T4_FILTER_VLAN)
168 if (mode & T4_FILTER_VNIC)
171 if (mode & T4_FILTER_PORT)
174 if (mode & T4_FILTER_FCoE)
181 mode_to_iconf(uint32_t mode)
184 if (mode & T4_FILTER_IC_VNIC)
189 static int check_fspec_against_fconf_iconf(struct adapter *sc,
190 struct t4_filter_specification *fs)
192 struct tp_params *tpp = &sc->params.tp;
195 if (fs->val.frag || fs->mask.frag)
196 fconf |= F_FRAGMENTATION;
198 if (fs->val.matchtype || fs->mask.matchtype)
199 fconf |= F_MPSHITTYPE;
201 if (fs->val.macidx || fs->mask.macidx)
204 if (fs->val.ethtype || fs->mask.ethtype)
205 fconf |= F_ETHERTYPE;
207 if (fs->val.proto || fs->mask.proto)
210 if (fs->val.tos || fs->mask.tos)
213 if (fs->val.vlan_vld || fs->mask.vlan_vld)
216 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
218 if (tpp->ingress_config & F_VNIC)
222 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
224 if ((tpp->ingress_config & F_VNIC) == 0)
228 if (fs->val.iport || fs->mask.iport)
231 if (fs->val.fcoe || fs->mask.fcoe)
234 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
241 get_filter_mode(struct adapter *sc, uint32_t *mode)
243 struct tp_params *tpp = &sc->params.tp;
246 * We trust the cached values of the relevant TP registers. This means
247 * things work reliably only if writes to those registers are always via
248 * t4_set_filter_mode.
250 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config);
256 set_filter_mode(struct adapter *sc, uint32_t mode)
258 struct tp_params *tpp = &sc->params.tp;
259 uint32_t fconf, iconf;
262 iconf = mode_to_iconf(mode);
263 if ((iconf ^ tpp->ingress_config) & F_VNIC) {
265 * For now we just complain if A_TP_INGRESS_CONFIG is not
266 * already set to the correct value for the requested filter
267 * mode. It's not clear if it's safe to write to this register
268 * on the fly. (And we trust the cached value of the register).
270 * check_fspec_against_fconf_iconf and other code that looks at
271 * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
272 * thorougly before allowing dynamic filter mode changes.
277 fconf = mode_to_fconf(mode);
279 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
284 if (sc->tids.ftids_in_use > 0) {
290 if (uld_active(sc, ULD_TOM)) {
296 rc = -t4_set_filter_mode(sc, fconf, true);
298 end_synchronized_op(sc, LOCK_HELD);
302 static inline uint64_t
303 get_filter_hits(struct adapter *sc, uint32_t tid)
307 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
312 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
313 return (be64toh(hits));
317 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
318 return (be32toh(hits));
323 get_filter(struct adapter *sc, struct t4_filter *t)
325 int i, nfilters = sc->tids.nftids;
326 struct filter_entry *f;
329 return (get_hashfilter(sc, t));
331 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
332 t->idx >= nfilters) {
337 mtx_lock(&sc->tids.ftid_lock);
338 f = &sc->tids.ftid_tab[t->idx];
339 MPASS(f->tid == sc->tids.ftid_base + t->idx);
340 for (i = t->idx; i < nfilters; i++, f++) {
343 t->l2tidx = f->l2te ? f->l2te->idx : 0;
344 t->smtidx = f->smtidx;
346 t->hits = get_filter_hits(sc, f->tid);
348 t->hits = UINT64_MAX;
356 mtx_unlock(&sc->tids.ftid_lock);
361 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te)
363 struct filter_entry *f;
364 struct fw_filter_wr *fwr;
365 u_int vnic_vld, vnic_vld_mask;
366 struct wrq_cookie cookie;
367 int i, rc, busy, locked;
368 const int ntids = t->fs.type ? 4 : 1;
371 MPASS(t->idx < sc->tids.nftids);
372 /* Already validated against fconf, iconf */
373 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
374 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
376 f = &sc->tids.ftid_tab[t->idx];
377 rc = busy = locked = 0;
378 mtx_lock(&sc->tids.ftid_lock);
379 for (i = 0; i < ntids; i++) {
380 busy += f[i].pending + f[i].valid;
381 locked += f[i].locked;
388 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16),
390 if (__predict_false(fwr == NULL))
394 sc->tids.ftids_in_use++;
397 mtx_unlock(&sc->tids.ftid_lock);
400 t4_l2t_release(l2te);
405 * Can't fail now. A set-filter WR will definitely be sent.
408 f->tid = sc->tids.ftid_base + t->idx;
412 if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
416 if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
421 bzero(fwr, sizeof(*fwr));
422 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
423 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
425 htobe32(V_FW_FILTER_WR_TID(f->tid) |
426 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
427 V_FW_FILTER_WR_NOREPLY(0) |
428 V_FW_FILTER_WR_IQ(f->fs.iq));
429 fwr->del_filter_to_l2tix =
430 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
431 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
432 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
433 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
434 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
435 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
436 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
437 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
438 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
439 f->fs.newvlan == VLAN_REWRITE) |
440 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
441 f->fs.newvlan == VLAN_REWRITE) |
442 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
443 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
444 V_FW_FILTER_WR_PRIO(f->fs.prio) |
445 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
446 fwr->ethtype = htobe16(f->fs.val.ethtype);
447 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
448 fwr->frag_to_ovlan_vldm =
449 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
450 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
451 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
452 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
453 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
454 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
456 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
457 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
458 fwr->maci_to_matchtypem =
459 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
460 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
461 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
462 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
463 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
464 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
465 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
466 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
467 fwr->ptcl = f->fs.val.proto;
468 fwr->ptclm = f->fs.mask.proto;
469 fwr->ttyp = f->fs.val.tos;
470 fwr->ttypm = f->fs.mask.tos;
471 fwr->ivlan = htobe16(f->fs.val.vlan);
472 fwr->ivlanm = htobe16(f->fs.mask.vlan);
473 fwr->ovlan = htobe16(f->fs.val.vnic);
474 fwr->ovlanm = htobe16(f->fs.mask.vnic);
475 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
476 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
477 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
478 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
479 fwr->lp = htobe16(f->fs.val.dport);
480 fwr->lpm = htobe16(f->fs.mask.dport);
481 fwr->fp = htobe16(f->fs.val.sport);
482 fwr->fpm = htobe16(f->fs.mask.sport);
484 /* XXX: need to use SMT idx instead */
485 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
487 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
489 /* Wait for response. */
490 mtx_lock(&sc->tids.ftid_lock);
492 if (f->pending == 0) {
493 rc = f->valid ? 0 : EIO;
496 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
501 mtx_unlock(&sc->tids.ftid_lock);
506 set_filter(struct adapter *sc, struct t4_filter *t)
508 struct tid_info *ti = &sc->tids;
509 struct l2t_entry *l2te;
513 * Basic filter checks first.
517 if (!is_hashfilter(sc) || ti->ntids == 0)
519 if (t->idx != (uint32_t)-1)
520 return (EINVAL); /* hw, not user picks the idx */
524 if (t->idx >= ti->nftids)
526 /* IPv6 filter idx must be 4 aligned */
527 if (t->fs.type == 1 &&
528 ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
532 /* T4 doesn't support removing VLAN Tags for loop back filters. */
533 if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
534 (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE))
537 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
539 if (t->fs.val.iport >= sc->params.nports)
542 /* Can't specify an iq if not steering to it */
543 if (!t->fs.dirsteer && t->fs.iq)
546 /* Validate against the global filter mode and ingress config */
547 rc = check_fspec_against_fconf_iconf(sc, &t->fs);
552 * Basic checks passed. Make sure the queues and tid tables are setup.
555 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
558 if (!(sc->flags & FULL_INIT_DONE) &&
559 ((rc = adapter_full_init(sc)) != 0)) {
560 end_synchronized_op(sc, 0);
564 if (__predict_false(ti->hftid_tab == NULL)) {
565 ti->hftid_tab = malloc(sizeof(*ti->hftid_tab) * ti->ntids,
566 M_CXGBE, M_NOWAIT | M_ZERO);
567 if (ti->hftid_tab == NULL) {
571 mtx_init(&ti->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
572 cv_init(&ti->hftid_cv, "t4hfcv");
574 if (__predict_false(sc->tids.atid_tab == NULL)) {
575 rc = alloc_atid_tab(&sc->tids, M_NOWAIT);
579 } else if (__predict_false(ti->ftid_tab == NULL)) {
580 KASSERT(ti->ftids_in_use == 0,
581 ("%s: no memory allocated but ftids_in_use > 0", __func__));
582 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
583 M_CXGBE, M_NOWAIT | M_ZERO);
584 if (ti->ftid_tab == NULL) {
588 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
589 cv_init(&ti->ftid_cv, "t4fcv");
592 end_synchronized_op(sc, 0);
597 * Allocate L2T entry, SMT entry, etc.
601 if (t->fs.newdmac || t->fs.newvlan) {
602 /* This filter needs an L2T entry; allocate one. */
603 l2te = t4_l2t_alloc_switching(sc->l2t);
604 if (__predict_false(l2te == NULL))
606 if (t4_l2t_set_switching(sc, l2te, t->fs.vlan, t->fs.eport,
608 t4_l2t_release(l2te);
619 return (set_hashfilter(sc, t, l2te));
621 return (set_tcamfilter(sc, t, l2te));
626 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
628 struct filter_entry *f;
629 struct fw_filter_wr *fwr;
630 struct wrq_cookie cookie;
633 MPASS(sc->tids.ftid_tab != NULL);
634 MPASS(sc->tids.nftids > 0);
636 if (t->idx >= sc->tids.nftids)
639 mtx_lock(&sc->tids.ftid_lock);
640 f = &sc->tids.ftid_tab[t->idx];
653 MPASS(f->tid == sc->tids.ftid_base + t->idx);
654 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
660 bzero(fwr, sizeof (*fwr));
661 t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
663 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
664 t->fs = f->fs; /* extra info for the caller */
667 if (f->pending == 0) {
668 rc = f->valid ? EIO : 0;
671 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
677 mtx_unlock(&sc->tids.ftid_lock);
682 del_filter(struct adapter *sc, struct t4_filter *t)
685 /* No filters possible if not initialized yet. */
686 if (!(sc->flags & FULL_INIT_DONE))
690 * The checks for tid tables ensure that the locks that del_* will reach
691 * for are initialized.
694 if (sc->tids.hftid_tab != NULL)
695 return (del_hashfilter(sc, t));
697 if (sc->tids.ftid_tab != NULL)
698 return (del_tcamfilter(sc, t));
705 * Release secondary resources associated with the filter.
708 free_filter_resources(struct filter_entry *f)
712 t4_l2t_release(f->l2te);
718 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
720 struct adapter *sc = iq->adapter;
721 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
722 u_int tid = GET_TID(rpl);
723 u_int rc, cleanup, idx;
724 struct filter_entry *f;
726 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
728 MPASS(is_ftid(sc, tid));
731 idx = tid - sc->tids.ftid_base;
732 f = &sc->tids.ftid_tab[idx];
733 rc = G_COOKIE(rpl->cookie);
735 mtx_lock(&sc->tids.ftid_lock);
736 KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
739 case FW_FILTER_WR_FLT_ADDED:
740 /* set-filter succeeded */
742 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
744 case FW_FILTER_WR_FLT_DELETED:
745 /* del-filter succeeded */
746 MPASS(f->valid == 1);
749 case FW_FILTER_WR_SMT_TBL_FULL:
750 /* set-filter failed due to lack of SMT space. */
751 MPASS(f->valid == 0);
752 free_filter_resources(f);
753 sc->tids.ftids_in_use--;
755 case FW_FILTER_WR_SUCCESS:
756 case FW_FILTER_WR_EINVAL:
758 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
762 cv_broadcast(&sc->tids.ftid_cv);
763 mtx_unlock(&sc->tids.ftid_lock);
769 * This is the reply to the Active Open that created the filter. Additional TCB
770 * updates may be required to complete the filter configuration.
773 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
776 struct adapter *sc = iq->adapter;
777 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
778 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
779 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
780 struct filter_entry *f = lookup_atid(sc, atid);
782 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
784 mtx_lock(&sc->tids.hftid_lock);
785 KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
786 KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
787 __func__, f, f->tid));
788 if (status == CPL_ERR_NONE) {
789 struct filter_entry *f2;
791 f->tid = GET_TID(cpl);
792 MPASS(f->tid < sc->tids.ntids);
793 if (__predict_false((f2 = lookup_hftid(sc, f->tid)) != NULL)) {
794 /* XXX: avoid hash collisions in the first place. */
795 MPASS(f2->tid == f->tid);
796 remove_hftid(sc, f2->tid, f2->fs.type ? 2 : 1);
797 free_filter_resources(f2);
800 insert_hftid(sc, f->tid, f, f->fs.type ? 2 : 1);
802 * Leave the filter pending until it is fully set up, which will
803 * be indicated by the reply to the last TCB update. No need to
804 * unblock the ioctl thread either.
806 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
811 /* provide errno instead of tid to ioctl */
812 f->tid = act_open_rpl_status_to_errno(status);
814 if (act_open_has_tid(status))
815 release_tid(sc, GET_TID(cpl), &sc->sge.mgmtq);
816 free_filter_resources(f);
820 cv_broadcast(&sc->tids.hftid_cv);
822 mtx_unlock(&sc->tids.hftid_lock);
829 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
832 struct adapter *sc = iq->adapter;
833 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
834 u_int tid = GET_TID(rpl);
835 struct filter_entry *f;
837 mtx_lock(&sc->tids.hftid_lock);
838 f = lookup_hftid(sc, tid);
839 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
840 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
842 KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
845 if (rpl->status == 0) {
850 free_filter_resources(f);
851 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
852 release_tid(sc, tid, &sc->sge.mgmtq);
856 cv_broadcast(&sc->tids.hftid_cv);
857 mtx_unlock(&sc->tids.hftid_lock);
863 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
866 struct adapter *sc = iq->adapter;
867 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
868 unsigned int tid = GET_TID(cpl);
869 struct filter_entry *f;
871 mtx_lock(&sc->tids.hftid_lock);
872 f = lookup_hftid(sc, tid);
873 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
874 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
876 KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
879 if (cpl->status == 0) {
881 free_filter_resources(f);
882 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
883 release_tid(sc, tid, &sc->sge.mgmtq);
887 cv_broadcast(&sc->tids.hftid_cv);
888 mtx_unlock(&sc->tids.hftid_lock);
894 get_hashfilter(struct adapter *sc, struct t4_filter *t)
896 int i, nfilters = sc->tids.ntids;
897 struct filter_entry *f;
899 if (sc->tids.tids_in_use == 0 || sc->tids.hftid_tab == NULL ||
900 t->idx >= nfilters) {
905 mtx_lock(&sc->tids.hftid_lock);
906 for (i = t->idx; i < nfilters; i++) {
907 f = lookup_hftid(sc, i);
908 if (f != NULL && f->valid) {
910 t->l2tidx = f->l2te ? f->l2te->idx : 0;
911 t->smtidx = f->smtidx;
913 t->hits = get_filter_hits(sc, t->idx);
915 t->hits = UINT64_MAX;
923 mtx_unlock(&sc->tids.hftid_lock);
928 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs)
930 struct tp_params *tp = &sc->params.tp;
934 * Initialize each of the fields which we care about which are present
935 * in the Compressed Filter Tuple.
937 if (tp->vlan_shift >= 0 && fs->mask.vlan)
938 ntuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
940 if (tp->port_shift >= 0 && fs->mask.iport)
941 ntuple |= (uint64_t)fs->val.iport << tp->port_shift;
943 if (tp->protocol_shift >= 0) {
945 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
947 ntuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
950 if (tp->tos_shift >= 0 && fs->mask.tos)
951 ntuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
953 if (tp->vnic_shift >= 0) {
955 if (tp->ingress_config & F_VNIC && fs->mask.pfvf_vld)
956 ntuple |= (uint64_t)((fs->val.pfvf_vld << 16) |
958 (fs->val.vf)) << tp->vnic_shift;
961 ntuple |= (uint64_t)((fs->val.ovlan_vld << 16) |
962 (fs->val.vnic)) << tp->vnic_shift;
965 if (tp->macmatch_shift >= 0 && fs->mask.macidx)
966 ntuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
968 if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
969 ntuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
971 if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
972 ntuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
974 if (tp->frag_shift >= 0 && fs->mask.frag)
975 ntuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
977 if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
978 ntuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
984 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
985 struct cpl_act_open_req6 *cpl)
987 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
988 struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
990 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
991 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
994 if (chip_id(sc) == CHELSIO_T5) {
1002 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1003 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1004 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1005 cpl->local_port = htobe16(f->fs.val.dport);
1006 cpl->peer_port = htobe16(f->fs.val.sport);
1007 cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1008 cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1009 cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1010 cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1011 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1012 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1013 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1014 V_NO_CONG(f->fs.rpttid) | F_TCAM_BYPASS | F_NON_OFFLOAD);
1016 cpl6->params = htobe64(V_FILTER_TUPLE(hashfilter_ntuple(sc, &f->fs)));
1017 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1018 F_T5_OPT_2_VALID | F_RX_CHANNEL |
1019 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1020 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1024 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1025 struct cpl_act_open_req *cpl)
1027 struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1028 struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1030 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1031 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1034 if (chip_id(sc) == CHELSIO_T5) {
1035 INIT_TP_WR(cpl5, 0);
1037 INIT_TP_WR(cpl6, 0);
1042 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1043 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1044 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1045 cpl->local_port = htobe16(f->fs.val.dport);
1046 cpl->peer_port = htobe16(f->fs.val.sport);
1047 cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1048 f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1049 cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1050 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1051 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1052 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1053 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1054 V_NO_CONG(f->fs.rpttid) | F_TCAM_BYPASS | F_NON_OFFLOAD);
1056 cpl6->params = htobe64(V_FILTER_TUPLE(hashfilter_ntuple(sc, &f->fs)));
1057 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1058 F_T5_OPT_2_VALID | F_RX_CHANNEL |
1059 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1060 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1064 act_open_cpl_len16(struct adapter *sc, int isipv6)
1067 static const int sz_table[3][2] = {
1069 howmany(sizeof (struct cpl_act_open_req), 16),
1070 howmany(sizeof (struct cpl_act_open_req6), 16)
1073 howmany(sizeof (struct cpl_t5_act_open_req), 16),
1074 howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1077 howmany(sizeof (struct cpl_t6_act_open_req), 16),
1078 howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1082 MPASS(chip_id(sc) >= CHELSIO_T4);
1083 idx = min(chip_id(sc) - CHELSIO_T4, 2);
1085 return (sz_table[idx][!!isipv6]);
1089 set_hashfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te)
1092 struct wrq_cookie cookie;
1093 struct filter_entry *f;
1097 /* Already validated against fconf, iconf */
1098 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1099 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1101 mtx_lock(&sc->tids.hftid_lock);
1104 * XXX: Check for hash collisions and insert in the hash based lookup
1105 * table so that in-flight hashfilters are also considered when checking
1109 f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1110 if (__predict_false(f == NULL)) {
1112 t4_l2t_release(l2te);
1119 atid = alloc_atid(sc, f);
1120 if (__predict_false(atid) == -1) {
1122 t4_l2t_release(l2te);
1129 wr = start_wrq_wr(&sc->sge.mgmtq, act_open_cpl_len16(sc, f->fs.type),
1132 free_atid(sc, atid);
1134 t4_l2t_release(l2te);
1140 mk_act_open_req6(sc, f, atid, wr);
1142 mk_act_open_req(sc, f, atid, wr);
1144 f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1147 commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1151 if (f->pending == 0) {
1162 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1169 mtx_unlock(&sc->tids.hftid_lock);
1173 /* SET_TCB_FIELD sent as a ULP command looks like this */
1174 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1175 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1178 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1179 uint64_t val, uint32_t tid, uint32_t qid)
1181 struct ulptx_idata *ulpsc;
1182 struct cpl_set_tcb_field_core *req;
1184 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1185 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1187 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1188 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1189 ulpsc->len = htobe32(sizeof(*req));
1191 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1192 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1193 req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1194 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1195 req->mask = htobe64(mask);
1196 req->val = htobe64(val);
1198 ulpsc = (struct ulptx_idata *)(req + 1);
1199 if (LEN__SET_TCB_FIELD_ULP % 16) {
1200 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1201 ulpsc->len = htobe32(0);
1207 /* ABORT_REQ sent as a ULP command looks like this */
1208 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1209 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1212 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1214 struct ulptx_idata *ulpsc;
1215 struct cpl_abort_req_core *req;
1217 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1218 ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1220 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1221 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1222 ulpsc->len = htobe32(sizeof(*req));
1224 req = (struct cpl_abort_req_core *)(ulpsc + 1);
1225 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1226 req->rsvd0 = htonl(0);
1228 req->cmd = CPL_ABORT_NO_RST;
1230 ulpsc = (struct ulptx_idata *)(req + 1);
1231 if (LEN__ABORT_REQ_ULP % 16) {
1232 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1233 ulpsc->len = htobe32(0);
1239 /* ABORT_RPL sent as a ULP command looks like this */
1240 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1241 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1244 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1246 struct ulptx_idata *ulpsc;
1247 struct cpl_abort_rpl_core *rpl;
1249 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1250 ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1252 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1253 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1254 ulpsc->len = htobe32(sizeof(*rpl));
1256 rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1257 OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1258 rpl->rsvd0 = htonl(0);
1260 rpl->cmd = CPL_ABORT_NO_RST;
1262 ulpsc = (struct ulptx_idata *)(rpl + 1);
1263 if (LEN__ABORT_RPL_ULP % 16) {
1264 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1265 ulpsc->len = htobe32(0);
1272 del_hashfilter_wrlen(void)
1275 return (sizeof(struct work_request_hdr) +
1276 roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1277 roundup2(LEN__ABORT_REQ_ULP, 16) +
1278 roundup2(LEN__ABORT_RPL_ULP, 16));
1282 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1284 struct ulp_txpkt *ulpmc;
1286 INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1287 ulpmc = (struct ulp_txpkt *)(wrh + 1);
1288 ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1289 V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1290 ulpmc = mk_abort_req_ulp(ulpmc, tid);
1291 ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1295 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1298 struct filter_entry *f;
1299 struct wrq_cookie cookie;
1301 const int wrlen = del_hashfilter_wrlen();
1303 MPASS(sc->tids.hftid_tab != NULL);
1304 MPASS(sc->tids.ntids > 0);
1306 if (t->idx >= sc->tids.ntids)
1309 mtx_lock(&sc->tids.hftid_lock);
1310 f = lookup_hftid(sc, t->idx);
1311 if (f == NULL || f->valid == 0) {
1315 MPASS(f->tid == t->idx);
1324 wr = start_wrq_wr(&sc->sge.mgmtq, howmany(wrlen, 16), &cookie);
1330 mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1333 commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1334 t->fs = f->fs; /* extra info for the caller */
1338 if (f->pending == 0) {
1348 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1355 mtx_unlock(&sc->tids.hftid_lock);
1360 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1361 uint64_t val, int no_reply)
1363 struct wrq_cookie cookie;
1364 struct cpl_set_tcb_field *req;
1366 req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie);
1369 bzero(req, sizeof(*req));
1370 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1371 if (no_reply == 0) {
1372 req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1375 req->reply_ctrl = htobe16(V_NO_REPLY(1));
1376 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1377 req->mask = htobe64(mask);
1378 req->val = htobe64(val);
1379 commit_wrq_wr(&sc->sge.mgmtq, req, &cookie);
1384 /* Set one of the t_flags bits in the TCB. */
1386 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val)
1389 return (set_tcb_field(sc, tid, W_TCB_T_FLAGS, 1ULL << bit_pos,
1390 (uint64_t)val << bit_pos, 1));
1394 * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1395 * last of the series of updates requested a reply. The reply informs the
1396 * driver that the filter is fully setup.
1399 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1403 MPASS(f->tid < sc->tids.ntids);
1406 MPASS(f->valid == 0);
1408 if (f->fs.newdmac) {
1409 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1);
1413 if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1414 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1);
1418 if (f->fs.hitcnts || updated > 0) {
1419 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1420 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1421 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1422 V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1423 return (EINPROGRESS);