2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018 Chelsio Communications, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
45 #include <netinet/in.h>
47 #include "common/common.h"
48 #include "common/t4_msg.h"
49 #include "common/t4_regs.h"
50 #include "common/t4_regs_values.h"
51 #include "common/t4_tcb.h"
56 uint32_t valid:1; /* filter allocated and valid */
57 uint32_t locked:1; /* filter is administratively locked or busy */
58 uint32_t pending:1; /* filter action is pending firmware reply */
59 int tid; /* tid of the filter TCB */
60 struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
61 struct smt_entry *smt; /* SMT entry for SMAC rewrite */
63 struct t4_filter_specification fs;
66 static void free_filter_resources(struct filter_entry *);
67 static int get_tcamfilter(struct adapter *, struct t4_filter *);
68 static int get_hashfilter(struct adapter *, struct t4_filter *);
69 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
70 struct l2t_entry *, struct smt_entry *);
71 static int del_hashfilter(struct adapter *, struct t4_filter *);
72 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
75 separate_hpfilter_region(struct adapter *sc)
78 return (chip_id(sc) >= CHELSIO_T6);
82 alloc_hftid_tab(struct tid_info *t, int flags)
86 MPASS(t->hftid_tab == NULL);
88 t->hftid_tab = malloc(sizeof(*t->hftid_tab) * t->ntids, M_CXGBE,
90 if (t->hftid_tab == NULL)
92 mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
93 cv_init(&t->hftid_cv, "t4hfcv");
99 free_hftid_tab(struct tid_info *t)
103 if (t->hftid_tab != NULL) {
105 for (i = 0; t->tids_in_use > 0 && i < t->ntids; i++) {
106 if (t->hftid_tab[i] == NULL)
108 free(t->hftid_tab[i], M_CXGBE);
111 free(t->hftid_tab, M_CXGBE);
115 if (mtx_initialized(&t->hftid_lock)) {
116 mtx_destroy(&t->hftid_lock);
117 cv_destroy(&t->hftid_cv);
122 insert_hftid(struct adapter *sc, int tid, void *ctx, int ntids)
124 struct tid_info *t = &sc->tids;
126 t->hftid_tab[tid] = ctx;
127 atomic_add_int(&t->tids_in_use, ntids);
131 lookup_hftid(struct adapter *sc, int tid)
133 struct tid_info *t = &sc->tids;
135 return (t->hftid_tab[tid]);
139 remove_hftid(struct adapter *sc, int tid, int ntids)
141 struct tid_info *t = &sc->tids;
143 t->hftid_tab[tid] = NULL;
144 atomic_subtract_int(&t->tids_in_use, ntids);
148 mode_to_fconf(uint32_t mode)
152 if (mode & T4_FILTER_IP_FRAGMENT)
153 fconf |= F_FRAGMENTATION;
155 if (mode & T4_FILTER_MPS_HIT_TYPE)
156 fconf |= F_MPSHITTYPE;
158 if (mode & T4_FILTER_MAC_IDX)
161 if (mode & T4_FILTER_ETH_TYPE)
162 fconf |= F_ETHERTYPE;
164 if (mode & T4_FILTER_IP_PROTO)
167 if (mode & T4_FILTER_IP_TOS)
170 if (mode & T4_FILTER_VLAN)
173 if (mode & T4_FILTER_VNIC)
176 if (mode & T4_FILTER_PORT)
179 if (mode & T4_FILTER_FCoE)
186 mode_to_iconf(uint32_t mode)
189 if (mode & T4_FILTER_IC_VNIC)
195 check_fspec_against_fconf_iconf(struct adapter *sc,
196 struct t4_filter_specification *fs)
198 struct tp_params *tpp = &sc->params.tp;
201 if (fs->val.frag || fs->mask.frag)
202 fconf |= F_FRAGMENTATION;
204 if (fs->val.matchtype || fs->mask.matchtype)
205 fconf |= F_MPSHITTYPE;
207 if (fs->val.macidx || fs->mask.macidx)
210 if (fs->val.ethtype || fs->mask.ethtype)
211 fconf |= F_ETHERTYPE;
213 if (fs->val.proto || fs->mask.proto)
216 if (fs->val.tos || fs->mask.tos)
219 if (fs->val.vlan_vld || fs->mask.vlan_vld)
222 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
224 if (tpp->ingress_config & F_VNIC)
228 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
230 if ((tpp->ingress_config & F_VNIC) == 0)
234 if (fs->val.iport || fs->mask.iport)
237 if (fs->val.fcoe || fs->mask.fcoe)
240 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
247 get_filter_mode(struct adapter *sc, uint32_t *mode)
249 struct tp_params *tp = &sc->params.tp;
252 /* Non-zero incoming value in mode means "hashfilter mode". */
253 mask = *mode ? tp->hash_filter_mask : UINT64_MAX;
256 *mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
257 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
259 #define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit) do { \
260 if (tp->vlan_pri_map & (fconf_bit)) { \
261 MPASS(tp->field_shift >= 0); \
262 if ((mask >> tp->field_shift & field_mask) == field_mask) \
263 *mode |= (mode_bit); \
267 CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT);
268 CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE);
269 CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX);
270 CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE);
271 CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO);
272 CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS);
273 CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN);
274 CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC);
275 if (tp->ingress_config & F_VNIC)
276 *mode |= T4_FILTER_IC_VNIC;
277 CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT);
278 CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE);
285 set_filter_mode(struct adapter *sc, uint32_t mode)
287 struct tp_params *tpp = &sc->params.tp;
288 uint32_t fconf, iconf;
291 iconf = mode_to_iconf(mode);
292 if ((iconf ^ tpp->ingress_config) & F_VNIC) {
294 * For now we just complain if A_TP_INGRESS_CONFIG is not
295 * already set to the correct value for the requested filter
296 * mode. It's not clear if it's safe to write to this register
297 * on the fly. (And we trust the cached value of the register).
299 * check_fspec_against_fconf_iconf and other code that looks at
300 * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
301 * thorougly before allowing dynamic filter mode changes.
306 fconf = mode_to_fconf(mode);
308 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
313 if (sc->tids.ftids_in_use > 0 || sc->tids.hpftids_in_use > 0) {
319 if (uld_active(sc, ULD_TOM)) {
325 rc = -t4_set_filter_mode(sc, fconf, true);
327 end_synchronized_op(sc, LOCK_HELD);
331 static inline uint64_t
332 get_filter_hits(struct adapter *sc, uint32_t tid)
336 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
341 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
342 return (be64toh(hits));
346 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
347 return (be32toh(hits));
352 get_filter(struct adapter *sc, struct t4_filter *t)
355 return (get_hashfilter(sc, t));
357 return (get_tcamfilter(sc, t));
361 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
362 struct smt_entry *smt)
364 struct filter_entry *f;
365 struct fw_filter2_wr *fwr;
366 u_int vnic_vld, vnic_vld_mask;
367 struct wrq_cookie cookie;
368 int i, rc, busy, locked;
370 const int ntids = t->fs.type ? 4 : 1;
373 /* Already validated against fconf, iconf */
374 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
375 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
377 if (separate_hpfilter_region(sc) && t->fs.prio) {
378 MPASS(t->idx < sc->tids.nhpftids);
379 f = &sc->tids.hpftid_tab[t->idx];
380 tid = sc->tids.hpftid_base + t->idx;
382 MPASS(t->idx < sc->tids.nftids);
383 f = &sc->tids.ftid_tab[t->idx];
384 tid = sc->tids.ftid_base + t->idx;
386 rc = busy = locked = 0;
387 mtx_lock(&sc->tids.ftid_lock);
388 for (i = 0; i < ntids; i++) {
389 busy += f[i].pending + f[i].valid;
390 locked += f[i].locked;
399 if (sc->params.filter2_wr_support)
400 len16 = howmany(sizeof(struct fw_filter2_wr), 16);
402 len16 = howmany(sizeof(struct fw_filter_wr), 16);
403 fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
404 if (__predict_false(fwr == NULL))
408 if (separate_hpfilter_region(sc) && t->fs.prio)
409 sc->tids.hpftids_in_use++;
411 sc->tids.ftids_in_use++;
414 mtx_unlock(&sc->tids.ftid_lock);
417 t4_l2t_release(l2te);
424 * Can't fail now. A set-filter WR will definitely be sent.
432 if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
436 if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
441 bzero(fwr, sizeof(*fwr));
442 if (sc->params.filter2_wr_support)
443 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
445 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
446 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
448 htobe32(V_FW_FILTER_WR_TID(f->tid) |
449 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
450 V_FW_FILTER_WR_NOREPLY(0) |
451 V_FW_FILTER_WR_IQ(f->fs.iq));
452 fwr->del_filter_to_l2tix =
453 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
454 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
455 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
456 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
457 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
458 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
459 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
460 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
461 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
462 f->fs.newvlan == VLAN_REWRITE) |
463 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
464 f->fs.newvlan == VLAN_REWRITE) |
465 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
466 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
467 V_FW_FILTER_WR_PRIO(f->fs.prio) |
468 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
469 fwr->ethtype = htobe16(f->fs.val.ethtype);
470 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
471 fwr->frag_to_ovlan_vldm =
472 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
473 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
474 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
475 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
476 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
477 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
479 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
480 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
481 fwr->maci_to_matchtypem =
482 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
483 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
484 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
485 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
486 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
487 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
488 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
489 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
490 fwr->ptcl = f->fs.val.proto;
491 fwr->ptclm = f->fs.mask.proto;
492 fwr->ttyp = f->fs.val.tos;
493 fwr->ttypm = f->fs.mask.tos;
494 fwr->ivlan = htobe16(f->fs.val.vlan);
495 fwr->ivlanm = htobe16(f->fs.mask.vlan);
496 fwr->ovlan = htobe16(f->fs.val.vnic);
497 fwr->ovlanm = htobe16(f->fs.mask.vnic);
498 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
499 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
500 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
501 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
502 fwr->lp = htobe16(f->fs.val.dport);
503 fwr->lpm = htobe16(f->fs.mask.dport);
504 fwr->fp = htobe16(f->fs.val.sport);
505 fwr->fpm = htobe16(f->fs.mask.sport);
506 /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
507 bzero(fwr->sma, sizeof (fwr->sma));
508 if (sc->params.filter2_wr_support) {
509 fwr->filter_type_swapmac =
510 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
511 fwr->natmode_to_ulp_type =
512 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
513 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
514 V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
515 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
516 memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
517 memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
518 fwr->newlport = htobe16(f->fs.nat_dport);
519 fwr->newfport = htobe16(f->fs.nat_sport);
520 fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
522 commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
524 /* Wait for response. */
525 mtx_lock(&sc->tids.ftid_lock);
527 if (f->pending == 0) {
528 rc = f->valid ? 0 : EIO;
531 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
536 mtx_unlock(&sc->tids.ftid_lock);
541 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
544 struct tp_params *tp = &sc->params.tp;
550 * Initialize each of the fields which we care about which are present
551 * in the Compressed Filter Tuple.
553 if (tp->vlan_shift >= 0 && fs->mask.vlan) {
554 *ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
555 fmask |= M_FT_VLAN << tp->vlan_shift;
558 if (tp->port_shift >= 0 && fs->mask.iport) {
559 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
560 fmask |= M_FT_PORT << tp->port_shift;
563 if (tp->protocol_shift >= 0 && fs->mask.proto) {
564 *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
565 fmask |= M_FT_PROTOCOL << tp->protocol_shift;
568 if (tp->tos_shift >= 0 && fs->mask.tos) {
569 *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
570 fmask |= M_FT_TOS << tp->tos_shift;
573 if (tp->vnic_shift >= 0 && fs->mask.vnic) {
574 /* F_VNIC in ingress config was already validated. */
575 if (tp->ingress_config & F_VNIC)
576 MPASS(fs->mask.pfvf_vld);
578 MPASS(fs->mask.ovlan_vld);
580 *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
581 fmask |= M_FT_VNIC_ID << tp->vnic_shift;
584 if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
585 *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
586 fmask |= M_FT_MACMATCH << tp->macmatch_shift;
589 if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
590 *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
591 fmask |= M_FT_ETHERTYPE << tp->ethertype_shift;
594 if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
595 *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
596 fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift;
599 if (tp->frag_shift >= 0 && fs->mask.frag) {
600 *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
601 fmask |= M_FT_FRAGMENTATION << tp->frag_shift;
604 if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
605 *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
606 fmask |= M_FT_FCOE << tp->fcoe_shift;
609 /* A hashfilter must conform to the filterMask. */
610 if (fmask != tp->hash_filter_mask)
617 set_filter(struct adapter *sc, struct t4_filter *t)
619 struct tid_info *ti = &sc->tids;
620 struct l2t_entry *l2te;
621 struct smt_entry *smt;
626 * Basic filter checks first.
630 if (!is_hashfilter(sc) || ti->ntids == 0)
632 /* Hardware, not user, selects a tid for hashfilters. */
633 if (t->idx != (uint32_t)-1)
635 /* T5 can't count hashfilter hits. */
636 if (is_t5(sc) && t->fs.hitcnts)
638 rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
642 if (separate_hpfilter_region(sc) && t->fs.prio) {
643 if (ti->nhpftids == 0)
645 if (t->idx >= ti->nhpftids)
650 if (t->idx >= ti->nftids)
653 /* IPv6 filter idx must be 4 aligned */
654 if (t->fs.type == 1 &&
655 ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
659 /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
660 if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
661 (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
662 t->fs.swapmac || t->fs.nat_mode))
665 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
667 if (t->fs.val.iport >= sc->params.nports)
670 /* Can't specify an iq if not steering to it */
671 if (!t->fs.dirsteer && t->fs.iq)
674 /* Validate against the global filter mode and ingress config */
675 rc = check_fspec_against_fconf_iconf(sc, &t->fs);
680 * Basic checks passed. Make sure the queues and tid tables are setup.
683 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
686 if (!(sc->flags & FULL_INIT_DONE) &&
687 ((rc = adapter_full_init(sc)) != 0)) {
688 end_synchronized_op(sc, 0);
692 if (__predict_false(ti->hftid_tab == NULL)) {
693 rc = alloc_hftid_tab(&sc->tids, M_NOWAIT);
697 if (__predict_false(sc->tids.atid_tab == NULL)) {
698 rc = alloc_atid_tab(&sc->tids, M_NOWAIT);
702 } else if (separate_hpfilter_region(sc) && t->fs.prio &&
703 __predict_false(ti->hpftid_tab == NULL)) {
704 MPASS(ti->nhpftids != 0);
705 KASSERT(ti->hpftids_in_use == 0,
706 ("%s: no memory allocated but hpftids_in_use is %u",
707 __func__, ti->hpftids_in_use));
708 ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
709 ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
710 if (ti->hpftid_tab == NULL) {
714 if (!mtx_initialized(&sc->tids.ftid_lock)) {
715 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
716 cv_init(&ti->ftid_cv, "t4fcv");
718 } else if (__predict_false(ti->ftid_tab == NULL)) {
719 MPASS(ti->nftids != 0);
720 KASSERT(ti->ftids_in_use == 0,
721 ("%s: no memory allocated but ftids_in_use is %u",
722 __func__, ti->ftids_in_use));
723 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
724 M_CXGBE, M_NOWAIT | M_ZERO);
725 if (ti->ftid_tab == NULL) {
729 if (!mtx_initialized(&sc->tids.ftid_lock)) {
730 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
731 cv_init(&ti->ftid_cv, "t4fcv");
735 end_synchronized_op(sc, 0);
740 * Allocate L2T entry, SMT entry, etc.
744 if (t->fs.newdmac || t->fs.newvlan) {
745 /* This filter needs an L2T entry; allocate one. */
746 l2te = t4_l2t_alloc_switching(sc->l2t);
747 if (__predict_false(l2te == NULL))
749 rc = t4_l2t_set_switching(sc, l2te, t->fs.vlan, t->fs.eport,
752 t4_l2t_release(l2te);
759 /* This filter needs an SMT entry; allocate one. */
760 smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
761 if (__predict_false(smt == NULL)) {
763 t4_l2t_release(l2te);
766 rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
770 t4_l2t_release(l2te);
776 return (set_hashfilter(sc, t, ftuple, l2te, smt));
778 return (set_tcamfilter(sc, t, l2te, smt));
783 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
785 struct filter_entry *f;
786 struct fw_filter_wr *fwr;
787 struct wrq_cookie cookie;
793 mtx_lock(&sc->tids.ftid_lock);
794 if (separate_hpfilter_region(sc) && t->fs.prio) {
795 nfilters = sc->tids.nhpftids;
796 f = sc->tids.hpftid_tab;
798 tid_base = sc->tids.hpftid_base;
801 nfilters = sc->tids.nftids;
802 f = sc->tids.ftid_tab;
804 tid_base = sc->tids.ftid_base;
807 MPASS(f != NULL); /* Caller checked this. */
808 if (t->idx >= nfilters) {
826 MPASS(f->tid == tid_base + t->idx);
827 fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
833 bzero(fwr, sizeof (*fwr));
834 t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
836 commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
837 t->fs = f->fs; /* extra info for the caller */
840 if (f->pending == 0) {
841 rc = f->valid ? EIO : 0;
844 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
850 mtx_unlock(&sc->tids.ftid_lock);
855 del_filter(struct adapter *sc, struct t4_filter *t)
858 /* No filters possible if not initialized yet. */
859 if (!(sc->flags & FULL_INIT_DONE))
863 * The checks for tid tables ensure that the locks that del_* will reach
864 * for are initialized.
867 if (sc->tids.hftid_tab != NULL)
868 return (del_hashfilter(sc, t));
869 } else if (separate_hpfilter_region(sc) && t->fs.prio) {
870 if (sc->tids.hpftid_tab != NULL)
871 return (del_tcamfilter(sc, t));
873 if (sc->tids.ftid_tab != NULL)
874 return (del_tcamfilter(sc, t));
881 * Release secondary resources associated with the filter.
884 free_filter_resources(struct filter_entry *f)
888 t4_l2t_release(f->l2te);
892 t4_smt_release(f->smt);
898 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
899 uint64_t val, int no_reply)
901 struct wrq_cookie cookie;
902 struct cpl_set_tcb_field *req;
904 req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
907 bzero(req, sizeof(*req));
908 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
910 req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
913 req->reply_ctrl = htobe16(V_NO_REPLY(1));
914 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
915 req->mask = htobe64(mask);
916 req->val = htobe64(val);
917 commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
922 /* Set one of the t_flags bits in the TCB. */
924 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
928 return (set_tcb_field(sc, tid, W_TCB_T_FLAGS, 1ULL << bit_pos,
929 (uint64_t)val << bit_pos, no_reply));
933 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
935 struct adapter *sc = iq->adapter;
936 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
937 u_int tid = GET_TID(rpl);
939 struct filter_entry *f;
941 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
945 if (is_hpftid(sc, tid)) {
946 idx = tid - sc->tids.hpftid_base;
947 f = &sc->tids.hpftid_tab[idx];
948 } else if (is_ftid(sc, tid)) {
949 idx = tid - sc->tids.ftid_base;
950 f = &sc->tids.ftid_tab[idx];
952 panic("%s: FW reply for invalid TID %d.", __func__, tid);
954 MPASS(f->tid == tid);
955 rc = G_COOKIE(rpl->cookie);
957 mtx_lock(&sc->tids.ftid_lock);
958 KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
961 case FW_FILTER_WR_FLT_ADDED:
962 /* set-filter succeeded */
965 MPASS(f->smt != NULL);
966 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
967 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
968 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
969 V_TCB_SMAC_SEL(f->smt->idx), 1);
970 /* XXX: wait for reply to TCB update before !pending */
973 case FW_FILTER_WR_FLT_DELETED:
974 /* del-filter succeeded */
975 MPASS(f->valid == 1);
978 case FW_FILTER_WR_SMT_TBL_FULL:
979 /* set-filter failed due to lack of SMT space. */
980 MPASS(f->valid == 0);
981 free_filter_resources(f);
982 if (separate_hpfilter_region(sc) && f->fs.prio)
983 sc->tids.hpftids_in_use--;
985 sc->tids.ftids_in_use--;
987 case FW_FILTER_WR_SUCCESS:
988 case FW_FILTER_WR_EINVAL:
990 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
994 cv_broadcast(&sc->tids.ftid_cv);
995 mtx_unlock(&sc->tids.ftid_lock);
1001 * This is the reply to the Active Open that created the filter. Additional TCB
1002 * updates may be required to complete the filter configuration.
1005 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1008 struct adapter *sc = iq->adapter;
1009 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1010 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1011 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1012 struct filter_entry *f = lookup_atid(sc, atid);
1014 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1016 mtx_lock(&sc->tids.hftid_lock);
1017 KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1018 KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1019 __func__, f, f->tid));
1020 if (status == CPL_ERR_NONE) {
1021 struct filter_entry *f2;
1023 f->tid = GET_TID(cpl);
1024 MPASS(f->tid < sc->tids.ntids);
1025 if (__predict_false((f2 = lookup_hftid(sc, f->tid)) != NULL)) {
1026 /* XXX: avoid hash collisions in the first place. */
1027 MPASS(f2->tid == f->tid);
1028 remove_hftid(sc, f2->tid, f2->fs.type ? 2 : 1);
1029 free_filter_resources(f2);
1032 insert_hftid(sc, f->tid, f, f->fs.type ? 2 : 1);
1034 * Leave the filter pending until it is fully set up, which will
1035 * be indicated by the reply to the last TCB update. No need to
1036 * unblock the ioctl thread either.
1038 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1043 /* provide errno instead of tid to ioctl */
1044 f->tid = act_open_rpl_status_to_errno(status);
1046 if (act_open_has_tid(status))
1047 release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1048 free_filter_resources(f);
1052 cv_broadcast(&sc->tids.hftid_cv);
1054 mtx_unlock(&sc->tids.hftid_lock);
1056 free_atid(sc, atid);
1061 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1064 struct adapter *sc = iq->adapter;
1065 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1066 u_int tid = GET_TID(rpl);
1067 struct filter_entry *f;
1069 mtx_lock(&sc->tids.hftid_lock);
1070 f = lookup_hftid(sc, tid);
1071 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1072 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1074 KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1077 if (rpl->status == 0) {
1082 free_filter_resources(f);
1083 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
1084 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1088 cv_broadcast(&sc->tids.hftid_cv);
1089 mtx_unlock(&sc->tids.hftid_lock);
1095 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1098 struct adapter *sc = iq->adapter;
1099 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1100 unsigned int tid = GET_TID(cpl);
1101 struct filter_entry *f;
1103 mtx_lock(&sc->tids.hftid_lock);
1104 f = lookup_hftid(sc, tid);
1105 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1106 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1108 KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1111 if (cpl->status == 0) {
1113 free_filter_resources(f);
1114 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
1115 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1119 cv_broadcast(&sc->tids.hftid_cv);
1120 mtx_unlock(&sc->tids.hftid_lock);
1126 get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1129 struct filter_entry *f;
1137 if (separate_hpfilter_region(sc) && t->fs.prio) {
1138 nfilters = sc->tids.nhpftids;
1139 f = sc->tids.hpftid_tab;
1140 in_use = sc->tids.hpftids_in_use;
1142 tid_base = sc->tids.hpftid_base;
1145 nfilters = sc->tids.nftids;
1146 f = sc->tids.ftid_tab;
1147 in_use = sc->tids.ftids_in_use;
1149 tid_base = sc->tids.ftid_base;
1153 if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1154 t->idx = 0xffffffff;
1159 mtx_lock(&sc->tids.ftid_lock);
1160 for (i = t->idx; i < nfilters; i++, f++) {
1162 MPASS(f->tid == tid_base + i);
1164 t->l2tidx = f->l2te ? f->l2te->idx : 0;
1165 t->smtidx = f->smt ? f->smt->idx : 0;
1167 t->hits = get_filter_hits(sc, f->tid);
1169 t->hits = UINT64_MAX;
1175 t->idx = 0xffffffff;
1177 mtx_unlock(&sc->tids.ftid_lock);
1182 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1184 int i, nfilters = sc->tids.ntids;
1185 struct filter_entry *f;
1189 if (sc->tids.tids_in_use == 0 || sc->tids.hftid_tab == NULL ||
1190 t->idx >= nfilters) {
1191 t->idx = 0xffffffff;
1195 mtx_lock(&sc->tids.hftid_lock);
1196 for (i = t->idx; i < nfilters; i++) {
1197 f = lookup_hftid(sc, i);
1198 if (f != NULL && f->valid) {
1200 t->l2tidx = f->l2te ? f->l2te->idx : 0;
1201 t->smtidx = f->smt ? f->smt->idx : 0;
1203 t->hits = get_filter_hits(sc, t->idx);
1205 t->hits = UINT64_MAX;
1211 t->idx = 0xffffffff;
1213 mtx_unlock(&sc->tids.hftid_lock);
1218 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1219 uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1221 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1222 struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1224 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1225 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1228 if (chip_id(sc) == CHELSIO_T5) {
1229 INIT_TP_WR(cpl5, 0);
1231 INIT_TP_WR(cpl6, 0);
1236 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1237 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1238 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1239 cpl->local_port = htobe16(f->fs.val.dport);
1240 cpl->peer_port = htobe16(f->fs.val.sport);
1241 cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1242 cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1243 cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1244 cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1245 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1246 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1247 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1248 V_NO_CONG(f->fs.rpttid) |
1249 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1250 F_TCAM_BYPASS | F_NON_OFFLOAD);
1252 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1253 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1254 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1255 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1256 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1257 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1258 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1262 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1263 uint64_t ftuple, struct cpl_act_open_req *cpl)
1265 struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1266 struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1268 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1269 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1272 if (chip_id(sc) == CHELSIO_T5) {
1273 INIT_TP_WR(cpl5, 0);
1275 INIT_TP_WR(cpl6, 0);
1280 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1281 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1282 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1283 cpl->local_port = htobe16(f->fs.val.dport);
1284 cpl->peer_port = htobe16(f->fs.val.sport);
1285 cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1286 f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1287 cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1288 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1289 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1290 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1291 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1292 V_NO_CONG(f->fs.rpttid) |
1293 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1294 F_TCAM_BYPASS | F_NON_OFFLOAD);
1296 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1297 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1298 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1299 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1300 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1301 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1302 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1306 act_open_cpl_len16(struct adapter *sc, int isipv6)
1309 static const int sz_table[3][2] = {
1311 howmany(sizeof (struct cpl_act_open_req), 16),
1312 howmany(sizeof (struct cpl_act_open_req6), 16)
1315 howmany(sizeof (struct cpl_t5_act_open_req), 16),
1316 howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1319 howmany(sizeof (struct cpl_t6_act_open_req), 16),
1320 howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1324 MPASS(chip_id(sc) >= CHELSIO_T4);
1325 idx = min(chip_id(sc) - CHELSIO_T4, 2);
1327 return (sz_table[idx][!!isipv6]);
1331 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1332 struct l2t_entry *l2te, struct smt_entry *smt)
1335 struct wrq_cookie cookie;
1336 struct filter_entry *f;
1340 /* Already validated against fconf, iconf */
1341 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1342 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1344 mtx_lock(&sc->tids.hftid_lock);
1347 * XXX: Check for hash collisions and insert in the hash based lookup
1348 * table so that in-flight hashfilters are also considered when checking
1352 f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1353 if (__predict_false(f == NULL)) {
1355 t4_l2t_release(l2te);
1357 t4_smt_release(smt);
1365 atid = alloc_atid(sc, f);
1366 if (__predict_false(atid) == -1) {
1368 t4_l2t_release(l2te);
1370 t4_smt_release(smt);
1377 wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1380 free_atid(sc, atid);
1382 t4_l2t_release(l2te);
1384 t4_smt_release(smt);
1390 mk_act_open_req6(sc, f, atid, ftuple, wr);
1392 mk_act_open_req(sc, f, atid, ftuple, wr);
1394 f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1397 commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1401 if (f->pending == 0) {
1412 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1419 mtx_unlock(&sc->tids.hftid_lock);
1423 /* SET_TCB_FIELD sent as a ULP command looks like this */
1424 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1425 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1428 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1429 uint64_t val, uint32_t tid, uint32_t qid)
1431 struct ulptx_idata *ulpsc;
1432 struct cpl_set_tcb_field_core *req;
1434 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1435 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1437 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1438 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1439 ulpsc->len = htobe32(sizeof(*req));
1441 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1442 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1443 req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1444 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1445 req->mask = htobe64(mask);
1446 req->val = htobe64(val);
1448 ulpsc = (struct ulptx_idata *)(req + 1);
1449 if (LEN__SET_TCB_FIELD_ULP % 16) {
1450 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1451 ulpsc->len = htobe32(0);
1457 /* ABORT_REQ sent as a ULP command looks like this */
1458 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1459 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1462 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1464 struct ulptx_idata *ulpsc;
1465 struct cpl_abort_req_core *req;
1467 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1468 ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1470 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1471 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1472 ulpsc->len = htobe32(sizeof(*req));
1474 req = (struct cpl_abort_req_core *)(ulpsc + 1);
1475 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1476 req->rsvd0 = htonl(0);
1478 req->cmd = CPL_ABORT_NO_RST;
1480 ulpsc = (struct ulptx_idata *)(req + 1);
1481 if (LEN__ABORT_REQ_ULP % 16) {
1482 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1483 ulpsc->len = htobe32(0);
1489 /* ABORT_RPL sent as a ULP command looks like this */
1490 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1491 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1494 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1496 struct ulptx_idata *ulpsc;
1497 struct cpl_abort_rpl_core *rpl;
1499 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1500 ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1502 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1503 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1504 ulpsc->len = htobe32(sizeof(*rpl));
1506 rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1507 OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1508 rpl->rsvd0 = htonl(0);
1510 rpl->cmd = CPL_ABORT_NO_RST;
1512 ulpsc = (struct ulptx_idata *)(rpl + 1);
1513 if (LEN__ABORT_RPL_ULP % 16) {
1514 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1515 ulpsc->len = htobe32(0);
1522 del_hashfilter_wrlen(void)
1525 return (sizeof(struct work_request_hdr) +
1526 roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1527 roundup2(LEN__ABORT_REQ_ULP, 16) +
1528 roundup2(LEN__ABORT_RPL_ULP, 16));
1532 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1534 struct ulp_txpkt *ulpmc;
1536 INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1537 ulpmc = (struct ulp_txpkt *)(wrh + 1);
1538 ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1539 V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1540 ulpmc = mk_abort_req_ulp(ulpmc, tid);
1541 ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1545 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1548 struct filter_entry *f;
1549 struct wrq_cookie cookie;
1551 const int wrlen = del_hashfilter_wrlen();
1553 MPASS(sc->tids.hftid_tab != NULL);
1554 MPASS(sc->tids.ntids > 0);
1556 if (t->idx >= sc->tids.ntids)
1559 mtx_lock(&sc->tids.hftid_lock);
1560 f = lookup_hftid(sc, t->idx);
1561 if (f == NULL || f->valid == 0) {
1565 MPASS(f->tid == t->idx);
1574 wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
1580 mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1583 commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1584 t->fs = f->fs; /* extra info for the caller */
1588 if (f->pending == 0) {
1598 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1605 mtx_unlock(&sc->tids.hftid_lock);
1609 #define WORD_MASK 0xffffffff
1611 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1612 const bool sip, const bool dp, const bool sp)
1617 set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1618 f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1619 f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1621 set_tcb_field(sc, f->tid,
1622 W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1623 f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1624 f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1626 set_tcb_field(sc, f->tid,
1627 W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1628 f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1629 f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1631 set_tcb_field(sc, f->tid,
1632 W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1633 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1634 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1636 set_tcb_field(sc, f->tid,
1637 W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1638 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1639 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1645 set_tcb_field(sc, f->tid,
1646 W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1647 f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1648 f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1650 set_tcb_field(sc, f->tid,
1651 W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1652 f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1653 f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1655 set_tcb_field(sc, f->tid,
1656 W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1657 f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1658 f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1660 set_tcb_field(sc, f->tid,
1661 W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1662 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1663 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1666 set_tcb_field(sc, f->tid,
1667 W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1668 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1669 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1673 set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1674 (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1678 * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1679 * last of the series of updates requested a reply. The reply informs the
1680 * driver that the filter is fully setup.
1683 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1687 MPASS(f->tid < sc->tids.ntids);
1690 MPASS(f->valid == 0);
1692 if (f->fs.newdmac) {
1693 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1697 if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1698 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1702 if (f->fs.newsmac) {
1703 MPASS(f->smt != NULL);
1704 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1705 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1706 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1711 switch(f->fs.nat_mode) {
1715 set_nat_params(sc, f, true, false, false, false);
1718 case NAT_MODE_DIP_DP:
1719 set_nat_params(sc, f, true, false, true, false);
1722 case NAT_MODE_DIP_DP_SIP:
1723 set_nat_params(sc, f, true, true, true, false);
1726 case NAT_MODE_DIP_DP_SP:
1727 set_nat_params(sc, f, true, false, true, true);
1730 case NAT_MODE_SIP_SP:
1731 set_nat_params(sc, f, false, true, false, true);
1734 case NAT_MODE_DIP_SIP_SP:
1735 set_nat_params(sc, f, true, true, false, true);
1739 set_nat_params(sc, f, true, true, true, true);
1743 MPASS(0); /* should have been validated earlier */
1748 if (f->fs.nat_seq_chk) {
1749 set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
1750 V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1751 V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
1755 if (is_t5(sc) && f->fs.action == FILTER_DROP) {
1757 * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
1759 set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
1760 V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
1765 * Enable switching after all secondary resources (L2T entry, SMT entry,
1766 * etc.) are setup so that any switched packet will use correct
1769 if (f->fs.action == FILTER_SWITCH) {
1770 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
1774 if (f->fs.hitcnts || updated > 0) {
1775 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1776 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1777 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1778 V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1779 return (EINPROGRESS);