2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018 Chelsio Communications, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
45 #include <netinet/in.h>
47 #include "common/common.h"
48 #include "common/t4_msg.h"
49 #include "common/t4_regs.h"
53 uint32_t valid:1; /* filter allocated and valid */
54 uint32_t locked:1; /* filter is administratively locked */
55 uint32_t pending:1; /* filter action is pending firmware reply */
56 uint32_t smtidx:8; /* Source MAC Table index for smac */
57 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
59 struct t4_filter_specification fs;
63 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf)
67 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
68 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
70 if (fconf & F_FRAGMENTATION)
71 mode |= T4_FILTER_IP_FRAGMENT;
73 if (fconf & F_MPSHITTYPE)
74 mode |= T4_FILTER_MPS_HIT_TYPE;
76 if (fconf & F_MACMATCH)
77 mode |= T4_FILTER_MAC_IDX;
79 if (fconf & F_ETHERTYPE)
80 mode |= T4_FILTER_ETH_TYPE;
82 if (fconf & F_PROTOCOL)
83 mode |= T4_FILTER_IP_PROTO;
86 mode |= T4_FILTER_IP_TOS;
89 mode |= T4_FILTER_VLAN;
91 if (fconf & F_VNIC_ID) {
92 mode |= T4_FILTER_VNIC;
94 mode |= T4_FILTER_IC_VNIC;
98 mode |= T4_FILTER_PORT;
101 mode |= T4_FILTER_FCoE;
107 mode_to_fconf(uint32_t mode)
111 if (mode & T4_FILTER_IP_FRAGMENT)
112 fconf |= F_FRAGMENTATION;
114 if (mode & T4_FILTER_MPS_HIT_TYPE)
115 fconf |= F_MPSHITTYPE;
117 if (mode & T4_FILTER_MAC_IDX)
120 if (mode & T4_FILTER_ETH_TYPE)
121 fconf |= F_ETHERTYPE;
123 if (mode & T4_FILTER_IP_PROTO)
126 if (mode & T4_FILTER_IP_TOS)
129 if (mode & T4_FILTER_VLAN)
132 if (mode & T4_FILTER_VNIC)
135 if (mode & T4_FILTER_PORT)
138 if (mode & T4_FILTER_FCoE)
145 mode_to_iconf(uint32_t mode)
148 if (mode & T4_FILTER_IC_VNIC)
153 static int check_fspec_against_fconf_iconf(struct adapter *sc,
154 struct t4_filter_specification *fs)
156 struct tp_params *tpp = &sc->params.tp;
159 if (fs->val.frag || fs->mask.frag)
160 fconf |= F_FRAGMENTATION;
162 if (fs->val.matchtype || fs->mask.matchtype)
163 fconf |= F_MPSHITTYPE;
165 if (fs->val.macidx || fs->mask.macidx)
168 if (fs->val.ethtype || fs->mask.ethtype)
169 fconf |= F_ETHERTYPE;
171 if (fs->val.proto || fs->mask.proto)
174 if (fs->val.tos || fs->mask.tos)
177 if (fs->val.vlan_vld || fs->mask.vlan_vld)
180 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
182 if (tpp->ingress_config & F_VNIC)
186 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
188 if ((tpp->ingress_config & F_VNIC) == 0)
192 if (fs->val.iport || fs->mask.iport)
195 if (fs->val.fcoe || fs->mask.fcoe)
198 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
205 get_filter_mode(struct adapter *sc, uint32_t *mode)
207 struct tp_params *tpp = &sc->params.tp;
210 * We trust the cached values of the relevant TP registers. This means
211 * things work reliably only if writes to those registers are always via
212 * t4_set_filter_mode_.
214 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config);
220 set_filter_mode(struct adapter *sc, uint32_t mode)
222 struct tp_params *tpp = &sc->params.tp;
223 uint32_t fconf, iconf;
226 iconf = mode_to_iconf(mode);
227 if ((iconf ^ tpp->ingress_config) & F_VNIC) {
229 * For now we just complain if A_TP_INGRESS_CONFIG is not
230 * already set to the correct value for the requested filter
231 * mode. It's not clear if it's safe to write to this register
232 * on the fly. (And we trust the cached value of the register).
237 fconf = mode_to_fconf(mode);
239 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
244 if (sc->tids.ftids_in_use > 0) {
250 if (uld_active(sc, ULD_TOM)) {
256 rc = -t4_set_filter_mode(sc, fconf, true);
258 end_synchronized_op(sc, LOCK_HELD);
262 static inline uint64_t
263 get_filter_hits(struct adapter *sc, uint32_t fid)
267 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) +
268 (fid + sc->tids.ftid_base) * TCB_SIZE;
273 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
274 return (be64toh(hits));
278 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
279 return (be32toh(hits));
284 get_filter(struct adapter *sc, struct t4_filter *t)
286 int i, rc, nfilters = sc->tids.nftids;
287 struct filter_entry *f;
289 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
294 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
295 t->idx >= nfilters) {
300 f = &sc->tids.ftid_tab[t->idx];
301 for (i = t->idx; i < nfilters; i++, f++) {
304 t->l2tidx = f->l2t ? f->l2t->idx : 0;
305 t->smtidx = f->smtidx;
307 t->hits = get_filter_hits(sc, t->idx);
309 t->hits = UINT64_MAX;
318 end_synchronized_op(sc, LOCK_HELD);
323 set_filter_wr(struct adapter *sc, int fidx)
325 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
326 struct fw_filter_wr *fwr;
327 unsigned int ftid, vnic_vld, vnic_vld_mask;
328 struct wrq_cookie cookie;
330 ASSERT_SYNCHRONIZED_OP(sc);
332 if (f->fs.newdmac || f->fs.newvlan) {
333 /* This filter needs an L2T entry; allocate one. */
334 f->l2t = t4_l2t_alloc_switching(sc->l2t);
337 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
339 t4_l2t_release(f->l2t);
345 /* Already validated against fconf, iconf */
346 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0);
347 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0);
348 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld)
352 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld)
357 ftid = sc->tids.ftid_base + fidx;
359 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
362 bzero(fwr, sizeof(*fwr));
364 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
365 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
367 htobe32(V_FW_FILTER_WR_TID(ftid) |
368 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
369 V_FW_FILTER_WR_NOREPLY(0) |
370 V_FW_FILTER_WR_IQ(f->fs.iq));
371 fwr->del_filter_to_l2tix =
372 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
373 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
374 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
375 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
376 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
377 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
378 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
379 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
380 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
381 f->fs.newvlan == VLAN_REWRITE) |
382 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
383 f->fs.newvlan == VLAN_REWRITE) |
384 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
385 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
386 V_FW_FILTER_WR_PRIO(f->fs.prio) |
387 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
388 fwr->ethtype = htobe16(f->fs.val.ethtype);
389 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
390 fwr->frag_to_ovlan_vldm =
391 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
392 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
393 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
394 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
395 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
396 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
398 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
399 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
400 fwr->maci_to_matchtypem =
401 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
402 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
403 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
404 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
405 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
406 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
407 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
408 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
409 fwr->ptcl = f->fs.val.proto;
410 fwr->ptclm = f->fs.mask.proto;
411 fwr->ttyp = f->fs.val.tos;
412 fwr->ttypm = f->fs.mask.tos;
413 fwr->ivlan = htobe16(f->fs.val.vlan);
414 fwr->ivlanm = htobe16(f->fs.mask.vlan);
415 fwr->ovlan = htobe16(f->fs.val.vnic);
416 fwr->ovlanm = htobe16(f->fs.mask.vnic);
417 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
418 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
419 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
420 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
421 fwr->lp = htobe16(f->fs.val.dport);
422 fwr->lpm = htobe16(f->fs.mask.dport);
423 fwr->fp = htobe16(f->fs.val.sport);
424 fwr->fpm = htobe16(f->fs.mask.sport);
426 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
429 sc->tids.ftids_in_use++;
431 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
436 set_filter(struct adapter *sc, struct t4_filter *t)
438 unsigned int nfilters, nports;
439 struct filter_entry *f;
442 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
446 nfilters = sc->tids.nftids;
447 nports = sc->params.nports;
454 if (t->idx >= nfilters) {
459 /* Validate against the global filter mode and ingress config */
460 rc = check_fspec_against_fconf_iconf(sc, &t->fs);
464 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
469 if (t->fs.val.iport >= nports) {
474 /* Can't specify an iq if not steering to it */
475 if (!t->fs.dirsteer && t->fs.iq) {
480 /* IPv6 filter idx must be 4 aligned */
481 if (t->fs.type == 1 &&
482 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
487 if (!(sc->flags & FULL_INIT_DONE) &&
488 ((rc = adapter_full_init(sc)) != 0))
491 if (sc->tids.ftid_tab == NULL) {
492 KASSERT(sc->tids.ftids_in_use == 0,
493 ("%s: no memory allocated but filters_in_use > 0",
496 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
497 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
498 if (sc->tids.ftid_tab == NULL) {
502 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
505 for (i = 0; i < 4; i++) {
506 f = &sc->tids.ftid_tab[t->idx + i];
508 if (f->pending || f->valid) {
521 f = &sc->tids.ftid_tab[t->idx];
524 rc = set_filter_wr(sc, t->idx);
526 end_synchronized_op(sc, 0);
529 mtx_lock(&sc->tids.ftid_lock);
531 if (f->pending == 0) {
532 rc = f->valid ? 0 : EIO;
536 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
537 PCATCH, "t4setfw", 0)) {
542 mtx_unlock(&sc->tids.ftid_lock);
548 del_filter_wr(struct adapter *sc, int fidx)
550 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
551 struct fw_filter_wr *fwr;
553 struct wrq_cookie cookie;
555 ftid = sc->tids.ftid_base + fidx;
557 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
560 bzero(fwr, sizeof (*fwr));
562 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
565 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
570 del_filter(struct adapter *sc, struct t4_filter *t)
572 unsigned int nfilters;
573 struct filter_entry *f;
576 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
580 nfilters = sc->tids.nftids;
587 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
588 t->idx >= nfilters) {
593 if (!(sc->flags & FULL_INIT_DONE)) {
598 f = &sc->tids.ftid_tab[t->idx];
610 t->fs = f->fs; /* extra info for the caller */
611 rc = del_filter_wr(sc, t->idx);
615 end_synchronized_op(sc, 0);
618 mtx_lock(&sc->tids.ftid_lock);
620 if (f->pending == 0) {
621 rc = f->valid ? EIO : 0;
625 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
626 PCATCH, "t4delfw", 0)) {
631 mtx_unlock(&sc->tids.ftid_lock);
638 clear_filter(struct filter_entry *f)
641 t4_l2t_release(f->l2t);
643 bzero(f, sizeof (*f));
647 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
649 struct adapter *sc = iq->adapter;
650 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
651 unsigned int idx = GET_TID(rpl);
653 struct filter_entry *f;
655 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
657 MPASS(iq == &sc->sge.fwq);
658 MPASS(is_ftid(sc, idx));
660 idx -= sc->tids.ftid_base;
661 f = &sc->tids.ftid_tab[idx];
662 rc = G_COOKIE(rpl->cookie);
664 mtx_lock(&sc->tids.ftid_lock);
665 if (rc == FW_FILTER_WR_FLT_ADDED) {
666 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
668 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
669 f->pending = 0; /* asynchronous setup completed */
672 if (rc != FW_FILTER_WR_FLT_DELETED) {
673 /* Add or delete failed, display an error */
675 "filter %u setup failed with error %u\n",
680 sc->tids.ftids_in_use--;
682 wakeup(&sc->tids.ftid_tab);
683 mtx_unlock(&sc->tids.ftid_lock);