2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018 Chelsio Communications, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
45 #include <netinet/in.h>
47 #include "common/common.h"
48 #include "common/t4_msg.h"
49 #include "common/t4_regs.h"
50 #include "common/t4_regs_values.h"
51 #include "common/t4_tcb.h"
56 uint32_t valid:1; /* filter allocated and valid */
57 uint32_t locked:1; /* filter is administratively locked or busy */
58 uint32_t pending:1; /* filter action is pending firmware reply */
59 int tid; /* tid of the filter TCB */
60 struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
61 struct smt_entry *smt; /* SMT entry for SMAC rewrite */
63 struct t4_filter_specification fs;
66 static void free_filter_resources(struct filter_entry *);
67 static int get_hashfilter(struct adapter *, struct t4_filter *);
68 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
69 struct l2t_entry *, struct smt_entry *);
70 static int del_hashfilter(struct adapter *, struct t4_filter *);
71 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
74 alloc_hftid_tab(struct tid_info *t, int flags)
78 MPASS(t->hftid_tab == NULL);
80 t->hftid_tab = malloc(sizeof(*t->hftid_tab) * t->ntids, M_CXGBE,
82 if (t->hftid_tab == NULL)
84 mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
85 cv_init(&t->hftid_cv, "t4hfcv");
91 free_hftid_tab(struct tid_info *t)
95 if (t->hftid_tab != NULL) {
97 for (i = 0; t->tids_in_use > 0 && i < t->ntids; i++) {
98 if (t->hftid_tab[i] == NULL)
100 free(t->hftid_tab[i], M_CXGBE);
103 free(t->hftid_tab, M_CXGBE);
107 if (mtx_initialized(&t->hftid_lock)) {
108 mtx_destroy(&t->hftid_lock);
109 cv_destroy(&t->hftid_cv);
114 insert_hftid(struct adapter *sc, int tid, void *ctx, int ntids)
116 struct tid_info *t = &sc->tids;
118 t->hftid_tab[tid] = ctx;
119 atomic_add_int(&t->tids_in_use, ntids);
123 lookup_hftid(struct adapter *sc, int tid)
125 struct tid_info *t = &sc->tids;
127 return (t->hftid_tab[tid]);
131 remove_hftid(struct adapter *sc, int tid, int ntids)
133 struct tid_info *t = &sc->tids;
135 t->hftid_tab[tid] = NULL;
136 atomic_subtract_int(&t->tids_in_use, ntids);
140 mode_to_fconf(uint32_t mode)
144 if (mode & T4_FILTER_IP_FRAGMENT)
145 fconf |= F_FRAGMENTATION;
147 if (mode & T4_FILTER_MPS_HIT_TYPE)
148 fconf |= F_MPSHITTYPE;
150 if (mode & T4_FILTER_MAC_IDX)
153 if (mode & T4_FILTER_ETH_TYPE)
154 fconf |= F_ETHERTYPE;
156 if (mode & T4_FILTER_IP_PROTO)
159 if (mode & T4_FILTER_IP_TOS)
162 if (mode & T4_FILTER_VLAN)
165 if (mode & T4_FILTER_VNIC)
168 if (mode & T4_FILTER_PORT)
171 if (mode & T4_FILTER_FCoE)
178 mode_to_iconf(uint32_t mode)
181 if (mode & T4_FILTER_IC_VNIC)
187 check_fspec_against_fconf_iconf(struct adapter *sc,
188 struct t4_filter_specification *fs)
190 struct tp_params *tpp = &sc->params.tp;
193 if (fs->val.frag || fs->mask.frag)
194 fconf |= F_FRAGMENTATION;
196 if (fs->val.matchtype || fs->mask.matchtype)
197 fconf |= F_MPSHITTYPE;
199 if (fs->val.macidx || fs->mask.macidx)
202 if (fs->val.ethtype || fs->mask.ethtype)
203 fconf |= F_ETHERTYPE;
205 if (fs->val.proto || fs->mask.proto)
208 if (fs->val.tos || fs->mask.tos)
211 if (fs->val.vlan_vld || fs->mask.vlan_vld)
214 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
216 if (tpp->ingress_config & F_VNIC)
220 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
222 if ((tpp->ingress_config & F_VNIC) == 0)
226 if (fs->val.iport || fs->mask.iport)
229 if (fs->val.fcoe || fs->mask.fcoe)
232 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
239 get_filter_mode(struct adapter *sc, uint32_t *mode)
241 struct tp_params *tp = &sc->params.tp;
244 /* Non-zero incoming value in mode means "hashfilter mode". */
245 mask = *mode ? tp->hash_filter_mask : UINT64_MAX;
248 *mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
249 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
251 #define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit) do { \
252 if (tp->vlan_pri_map & (fconf_bit)) { \
253 MPASS(tp->field_shift >= 0); \
254 if ((mask >> tp->field_shift & field_mask) == field_mask) \
255 *mode |= (mode_bit); \
259 CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT);
260 CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE);
261 CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX);
262 CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE);
263 CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO);
264 CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS);
265 CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN);
266 CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC);
267 if (tp->ingress_config & F_VNIC)
268 *mode |= T4_FILTER_IC_VNIC;
269 CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT);
270 CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE);
277 set_filter_mode(struct adapter *sc, uint32_t mode)
279 struct tp_params *tpp = &sc->params.tp;
280 uint32_t fconf, iconf;
283 iconf = mode_to_iconf(mode);
284 if ((iconf ^ tpp->ingress_config) & F_VNIC) {
286 * For now we just complain if A_TP_INGRESS_CONFIG is not
287 * already set to the correct value for the requested filter
288 * mode. It's not clear if it's safe to write to this register
289 * on the fly. (And we trust the cached value of the register).
291 * check_fspec_against_fconf_iconf and other code that looks at
292 * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
293 * thorougly before allowing dynamic filter mode changes.
298 fconf = mode_to_fconf(mode);
300 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
305 if (sc->tids.ftids_in_use > 0) {
311 if (uld_active(sc, ULD_TOM)) {
317 rc = -t4_set_filter_mode(sc, fconf, true);
319 end_synchronized_op(sc, LOCK_HELD);
323 static inline uint64_t
324 get_filter_hits(struct adapter *sc, uint32_t tid)
328 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
333 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
334 return (be64toh(hits));
338 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
339 return (be32toh(hits));
344 get_filter(struct adapter *sc, struct t4_filter *t)
346 int i, nfilters = sc->tids.nftids;
347 struct filter_entry *f;
350 return (get_hashfilter(sc, t));
352 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
353 t->idx >= nfilters) {
358 mtx_lock(&sc->tids.ftid_lock);
359 f = &sc->tids.ftid_tab[t->idx];
360 for (i = t->idx; i < nfilters; i++, f++) {
362 MPASS(f->tid == sc->tids.ftid_base + i);
364 t->l2tidx = f->l2te ? f->l2te->idx : 0;
365 t->smtidx = f->smt ? f->smt->idx : 0;
367 t->hits = get_filter_hits(sc, f->tid);
369 t->hits = UINT64_MAX;
377 mtx_unlock(&sc->tids.ftid_lock);
382 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
383 struct smt_entry *smt)
385 struct filter_entry *f;
386 struct fw_filter2_wr *fwr;
387 u_int vnic_vld, vnic_vld_mask;
388 struct wrq_cookie cookie;
389 int i, rc, busy, locked;
390 const int ntids = t->fs.type ? 4 : 1;
393 MPASS(t->idx < sc->tids.nftids);
394 /* Already validated against fconf, iconf */
395 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
396 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
398 f = &sc->tids.ftid_tab[t->idx];
399 rc = busy = locked = 0;
400 mtx_lock(&sc->tids.ftid_lock);
401 for (i = 0; i < ntids; i++) {
402 busy += f[i].pending + f[i].valid;
403 locked += f[i].locked;
412 if (sc->params.filter2_wr_support)
413 len16 = howmany(sizeof(struct fw_filter2_wr), 16);
415 len16 = howmany(sizeof(struct fw_filter_wr), 16);
416 fwr = start_wrq_wr(&sc->sge.mgmtq, len16, &cookie);
417 if (__predict_false(fwr == NULL))
421 sc->tids.ftids_in_use++;
424 mtx_unlock(&sc->tids.ftid_lock);
427 t4_l2t_release(l2te);
434 * Can't fail now. A set-filter WR will definitely be sent.
437 f->tid = sc->tids.ftid_base + t->idx;
442 if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
446 if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
451 bzero(fwr, sizeof(*fwr));
452 if (sc->params.filter2_wr_support)
453 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
455 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
456 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
458 htobe32(V_FW_FILTER_WR_TID(f->tid) |
459 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
460 V_FW_FILTER_WR_NOREPLY(0) |
461 V_FW_FILTER_WR_IQ(f->fs.iq));
462 fwr->del_filter_to_l2tix =
463 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
464 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
465 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
466 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
467 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
468 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
469 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
470 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
471 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
472 f->fs.newvlan == VLAN_REWRITE) |
473 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
474 f->fs.newvlan == VLAN_REWRITE) |
475 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
476 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
477 V_FW_FILTER_WR_PRIO(f->fs.prio) |
478 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
479 fwr->ethtype = htobe16(f->fs.val.ethtype);
480 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
481 fwr->frag_to_ovlan_vldm =
482 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
483 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
484 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
485 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
486 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
487 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
489 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
490 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
491 fwr->maci_to_matchtypem =
492 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
493 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
494 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
495 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
496 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
497 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
498 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
499 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
500 fwr->ptcl = f->fs.val.proto;
501 fwr->ptclm = f->fs.mask.proto;
502 fwr->ttyp = f->fs.val.tos;
503 fwr->ttypm = f->fs.mask.tos;
504 fwr->ivlan = htobe16(f->fs.val.vlan);
505 fwr->ivlanm = htobe16(f->fs.mask.vlan);
506 fwr->ovlan = htobe16(f->fs.val.vnic);
507 fwr->ovlanm = htobe16(f->fs.mask.vnic);
508 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
509 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
510 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
511 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
512 fwr->lp = htobe16(f->fs.val.dport);
513 fwr->lpm = htobe16(f->fs.mask.dport);
514 fwr->fp = htobe16(f->fs.val.sport);
515 fwr->fpm = htobe16(f->fs.mask.sport);
516 /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
517 bzero(fwr->sma, sizeof (fwr->sma));
518 if (sc->params.filter2_wr_support) {
519 fwr->filter_type_swapmac =
520 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
521 fwr->natmode_to_ulp_type =
522 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
523 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
524 V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
525 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
526 memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
527 memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
528 fwr->newlport = htobe16(f->fs.nat_dport);
529 fwr->newfport = htobe16(f->fs.nat_sport);
530 fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
532 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
534 /* Wait for response. */
535 mtx_lock(&sc->tids.ftid_lock);
537 if (f->pending == 0) {
538 rc = f->valid ? 0 : EIO;
541 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
546 mtx_unlock(&sc->tids.ftid_lock);
551 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
554 struct tp_params *tp = &sc->params.tp;
560 * Initialize each of the fields which we care about which are present
561 * in the Compressed Filter Tuple.
563 if (tp->vlan_shift >= 0 && fs->mask.vlan) {
564 *ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
565 fmask |= M_FT_VLAN << tp->vlan_shift;
568 if (tp->port_shift >= 0 && fs->mask.iport) {
569 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
570 fmask |= M_FT_PORT << tp->port_shift;
573 if (tp->protocol_shift >= 0 && fs->mask.proto) {
574 *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
575 fmask |= M_FT_PROTOCOL << tp->protocol_shift;
578 if (tp->tos_shift >= 0 && fs->mask.tos) {
579 *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
580 fmask |= M_FT_TOS << tp->tos_shift;
583 if (tp->vnic_shift >= 0 && fs->mask.vnic) {
584 /* F_VNIC in ingress config was already validated. */
585 if (tp->ingress_config & F_VNIC)
586 MPASS(fs->mask.pfvf_vld);
588 MPASS(fs->mask.ovlan_vld);
590 *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
591 fmask |= M_FT_VNIC_ID << tp->vnic_shift;
594 if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
595 *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
596 fmask |= M_FT_MACMATCH << tp->macmatch_shift;
599 if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
600 *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
601 fmask |= M_FT_ETHERTYPE << tp->ethertype_shift;
604 if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
605 *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
606 fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift;
609 if (tp->frag_shift >= 0 && fs->mask.frag) {
610 *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
611 fmask |= M_FT_FRAGMENTATION << tp->frag_shift;
614 if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
615 *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
616 fmask |= M_FT_FCOE << tp->fcoe_shift;
619 /* A hashfilter must conform to the filterMask. */
620 if (fmask != tp->hash_filter_mask)
627 set_filter(struct adapter *sc, struct t4_filter *t)
629 struct tid_info *ti = &sc->tids;
630 struct l2t_entry *l2te;
631 struct smt_entry *smt;
636 * Basic filter checks first.
640 if (!is_hashfilter(sc) || ti->ntids == 0)
642 /* Hardware, not user, selects a tid for hashfilters. */
643 if (t->idx != (uint32_t)-1)
645 /* T5 can't count hashfilter hits. */
646 if (is_t5(sc) && t->fs.hitcnts)
648 rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
654 if (t->idx >= ti->nftids)
656 /* IPv6 filter idx must be 4 aligned */
657 if (t->fs.type == 1 &&
658 ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
662 /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
663 if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
664 (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
665 t->fs.swapmac || t->fs.nat_mode))
668 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
670 if (t->fs.val.iport >= sc->params.nports)
673 /* Can't specify an iq if not steering to it */
674 if (!t->fs.dirsteer && t->fs.iq)
677 /* Validate against the global filter mode and ingress config */
678 rc = check_fspec_against_fconf_iconf(sc, &t->fs);
683 * Basic checks passed. Make sure the queues and tid tables are setup.
686 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
689 if (!(sc->flags & FULL_INIT_DONE) &&
690 ((rc = adapter_full_init(sc)) != 0)) {
691 end_synchronized_op(sc, 0);
695 if (__predict_false(ti->hftid_tab == NULL)) {
696 rc = alloc_hftid_tab(&sc->tids, M_NOWAIT);
700 if (__predict_false(sc->tids.atid_tab == NULL)) {
701 rc = alloc_atid_tab(&sc->tids, M_NOWAIT);
705 } else if (__predict_false(ti->ftid_tab == NULL)) {
706 KASSERT(ti->ftids_in_use == 0,
707 ("%s: no memory allocated but ftids_in_use > 0", __func__));
708 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
709 M_CXGBE, M_NOWAIT | M_ZERO);
710 if (ti->ftid_tab == NULL) {
714 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
715 cv_init(&ti->ftid_cv, "t4fcv");
718 end_synchronized_op(sc, 0);
723 * Allocate L2T entry, SMT entry, etc.
727 if (t->fs.newdmac || t->fs.newvlan) {
728 /* This filter needs an L2T entry; allocate one. */
729 l2te = t4_l2t_alloc_switching(sc->l2t);
730 if (__predict_false(l2te == NULL))
732 rc = t4_l2t_set_switching(sc, l2te, t->fs.vlan, t->fs.eport,
735 t4_l2t_release(l2te);
742 /* This filter needs an SMT entry; allocate one. */
743 smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
744 if (__predict_false(smt == NULL)) {
746 t4_l2t_release(l2te);
749 rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
753 t4_l2t_release(l2te);
759 return (set_hashfilter(sc, t, ftuple, l2te, smt));
761 return (set_tcamfilter(sc, t, l2te, smt));
766 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
768 struct filter_entry *f;
769 struct fw_filter_wr *fwr;
770 struct wrq_cookie cookie;
773 MPASS(sc->tids.ftid_tab != NULL);
774 MPASS(sc->tids.nftids > 0);
776 if (t->idx >= sc->tids.nftids)
779 mtx_lock(&sc->tids.ftid_lock);
780 f = &sc->tids.ftid_tab[t->idx];
793 MPASS(f->tid == sc->tids.ftid_base + t->idx);
794 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
800 bzero(fwr, sizeof (*fwr));
801 t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
803 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
804 t->fs = f->fs; /* extra info for the caller */
807 if (f->pending == 0) {
808 rc = f->valid ? EIO : 0;
811 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
817 mtx_unlock(&sc->tids.ftid_lock);
822 del_filter(struct adapter *sc, struct t4_filter *t)
825 /* No filters possible if not initialized yet. */
826 if (!(sc->flags & FULL_INIT_DONE))
830 * The checks for tid tables ensure that the locks that del_* will reach
831 * for are initialized.
834 if (sc->tids.hftid_tab != NULL)
835 return (del_hashfilter(sc, t));
837 if (sc->tids.ftid_tab != NULL)
838 return (del_tcamfilter(sc, t));
845 * Release secondary resources associated with the filter.
848 free_filter_resources(struct filter_entry *f)
852 t4_l2t_release(f->l2te);
856 t4_smt_release(f->smt);
862 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
863 uint64_t val, int no_reply)
865 struct wrq_cookie cookie;
866 struct cpl_set_tcb_field *req;
868 req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie);
871 bzero(req, sizeof(*req));
872 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
874 req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
877 req->reply_ctrl = htobe16(V_NO_REPLY(1));
878 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
879 req->mask = htobe64(mask);
880 req->val = htobe64(val);
881 commit_wrq_wr(&sc->sge.mgmtq, req, &cookie);
886 /* Set one of the t_flags bits in the TCB. */
888 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
892 return (set_tcb_field(sc, tid, W_TCB_T_FLAGS, 1ULL << bit_pos,
893 (uint64_t)val << bit_pos, no_reply));
897 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
899 struct adapter *sc = iq->adapter;
900 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
901 u_int tid = GET_TID(rpl);
902 u_int rc, cleanup, idx;
903 struct filter_entry *f;
905 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
907 MPASS(is_ftid(sc, tid));
910 idx = tid - sc->tids.ftid_base;
911 f = &sc->tids.ftid_tab[idx];
912 rc = G_COOKIE(rpl->cookie);
914 mtx_lock(&sc->tids.ftid_lock);
915 KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
918 case FW_FILTER_WR_FLT_ADDED:
919 /* set-filter succeeded */
922 MPASS(f->smt != NULL);
923 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
924 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
925 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
926 V_TCB_SMAC_SEL(f->smt->idx), 1);
927 /* XXX: wait for reply to TCB update before !pending */
930 case FW_FILTER_WR_FLT_DELETED:
931 /* del-filter succeeded */
932 MPASS(f->valid == 1);
935 case FW_FILTER_WR_SMT_TBL_FULL:
936 /* set-filter failed due to lack of SMT space. */
937 MPASS(f->valid == 0);
938 free_filter_resources(f);
939 sc->tids.ftids_in_use--;
941 case FW_FILTER_WR_SUCCESS:
942 case FW_FILTER_WR_EINVAL:
944 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
948 cv_broadcast(&sc->tids.ftid_cv);
949 mtx_unlock(&sc->tids.ftid_lock);
955 * This is the reply to the Active Open that created the filter. Additional TCB
956 * updates may be required to complete the filter configuration.
959 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
962 struct adapter *sc = iq->adapter;
963 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
964 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
965 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
966 struct filter_entry *f = lookup_atid(sc, atid);
968 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
970 mtx_lock(&sc->tids.hftid_lock);
971 KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
972 KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
973 __func__, f, f->tid));
974 if (status == CPL_ERR_NONE) {
975 struct filter_entry *f2;
977 f->tid = GET_TID(cpl);
978 MPASS(f->tid < sc->tids.ntids);
979 if (__predict_false((f2 = lookup_hftid(sc, f->tid)) != NULL)) {
980 /* XXX: avoid hash collisions in the first place. */
981 MPASS(f2->tid == f->tid);
982 remove_hftid(sc, f2->tid, f2->fs.type ? 2 : 1);
983 free_filter_resources(f2);
986 insert_hftid(sc, f->tid, f, f->fs.type ? 2 : 1);
988 * Leave the filter pending until it is fully set up, which will
989 * be indicated by the reply to the last TCB update. No need to
990 * unblock the ioctl thread either.
992 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
997 /* provide errno instead of tid to ioctl */
998 f->tid = act_open_rpl_status_to_errno(status);
1000 if (act_open_has_tid(status))
1001 release_tid(sc, GET_TID(cpl), &sc->sge.mgmtq);
1002 free_filter_resources(f);
1006 cv_broadcast(&sc->tids.hftid_cv);
1008 mtx_unlock(&sc->tids.hftid_lock);
1010 free_atid(sc, atid);
1015 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1018 struct adapter *sc = iq->adapter;
1019 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1020 u_int tid = GET_TID(rpl);
1021 struct filter_entry *f;
1023 mtx_lock(&sc->tids.hftid_lock);
1024 f = lookup_hftid(sc, tid);
1025 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1026 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1028 KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1031 if (rpl->status == 0) {
1036 free_filter_resources(f);
1037 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
1038 release_tid(sc, tid, &sc->sge.mgmtq);
1042 cv_broadcast(&sc->tids.hftid_cv);
1043 mtx_unlock(&sc->tids.hftid_lock);
1049 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1052 struct adapter *sc = iq->adapter;
1053 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1054 unsigned int tid = GET_TID(cpl);
1055 struct filter_entry *f;
1057 mtx_lock(&sc->tids.hftid_lock);
1058 f = lookup_hftid(sc, tid);
1059 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1060 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1062 KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1065 if (cpl->status == 0) {
1067 free_filter_resources(f);
1068 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
1069 release_tid(sc, tid, &sc->sge.mgmtq);
1073 cv_broadcast(&sc->tids.hftid_cv);
1074 mtx_unlock(&sc->tids.hftid_lock);
1080 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1082 int i, nfilters = sc->tids.ntids;
1083 struct filter_entry *f;
1085 if (sc->tids.tids_in_use == 0 || sc->tids.hftid_tab == NULL ||
1086 t->idx >= nfilters) {
1087 t->idx = 0xffffffff;
1091 mtx_lock(&sc->tids.hftid_lock);
1092 for (i = t->idx; i < nfilters; i++) {
1093 f = lookup_hftid(sc, i);
1094 if (f != NULL && f->valid) {
1096 t->l2tidx = f->l2te ? f->l2te->idx : 0;
1097 t->smtidx = f->smt ? f->smt->idx : 0;
1099 t->hits = get_filter_hits(sc, t->idx);
1101 t->hits = UINT64_MAX;
1107 t->idx = 0xffffffff;
1109 mtx_unlock(&sc->tids.hftid_lock);
1114 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1115 uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1117 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1118 struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1120 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1121 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1124 if (chip_id(sc) == CHELSIO_T5) {
1125 INIT_TP_WR(cpl5, 0);
1127 INIT_TP_WR(cpl6, 0);
1132 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1133 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1134 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1135 cpl->local_port = htobe16(f->fs.val.dport);
1136 cpl->peer_port = htobe16(f->fs.val.sport);
1137 cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1138 cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1139 cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1140 cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1141 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1142 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1143 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1144 V_NO_CONG(f->fs.rpttid) |
1145 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1146 F_TCAM_BYPASS | F_NON_OFFLOAD);
1148 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1149 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1150 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1151 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1152 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1153 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1154 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1158 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1159 uint64_t ftuple, struct cpl_act_open_req *cpl)
1161 struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1162 struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1164 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1165 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1168 if (chip_id(sc) == CHELSIO_T5) {
1169 INIT_TP_WR(cpl5, 0);
1171 INIT_TP_WR(cpl6, 0);
1176 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1177 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1178 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1179 cpl->local_port = htobe16(f->fs.val.dport);
1180 cpl->peer_port = htobe16(f->fs.val.sport);
1181 cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1182 f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1183 cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1184 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1185 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1186 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1187 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1188 V_NO_CONG(f->fs.rpttid) |
1189 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1190 F_TCAM_BYPASS | F_NON_OFFLOAD);
1192 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1193 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1194 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1195 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1196 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1197 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1198 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1202 act_open_cpl_len16(struct adapter *sc, int isipv6)
1205 static const int sz_table[3][2] = {
1207 howmany(sizeof (struct cpl_act_open_req), 16),
1208 howmany(sizeof (struct cpl_act_open_req6), 16)
1211 howmany(sizeof (struct cpl_t5_act_open_req), 16),
1212 howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1215 howmany(sizeof (struct cpl_t6_act_open_req), 16),
1216 howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1220 MPASS(chip_id(sc) >= CHELSIO_T4);
1221 idx = min(chip_id(sc) - CHELSIO_T4, 2);
1223 return (sz_table[idx][!!isipv6]);
1227 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1228 struct l2t_entry *l2te, struct smt_entry *smt)
1231 struct wrq_cookie cookie;
1232 struct filter_entry *f;
1236 /* Already validated against fconf, iconf */
1237 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1238 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1240 mtx_lock(&sc->tids.hftid_lock);
1243 * XXX: Check for hash collisions and insert in the hash based lookup
1244 * table so that in-flight hashfilters are also considered when checking
1248 f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1249 if (__predict_false(f == NULL)) {
1251 t4_l2t_release(l2te);
1253 t4_smt_release(smt);
1261 atid = alloc_atid(sc, f);
1262 if (__predict_false(atid) == -1) {
1264 t4_l2t_release(l2te);
1266 t4_smt_release(smt);
1273 wr = start_wrq_wr(&sc->sge.mgmtq, act_open_cpl_len16(sc, f->fs.type),
1276 free_atid(sc, atid);
1278 t4_l2t_release(l2te);
1280 t4_smt_release(smt);
1286 mk_act_open_req6(sc, f, atid, ftuple, wr);
1288 mk_act_open_req(sc, f, atid, ftuple, wr);
1290 f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1293 commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1297 if (f->pending == 0) {
1308 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1315 mtx_unlock(&sc->tids.hftid_lock);
1319 /* SET_TCB_FIELD sent as a ULP command looks like this */
1320 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1321 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1324 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1325 uint64_t val, uint32_t tid, uint32_t qid)
1327 struct ulptx_idata *ulpsc;
1328 struct cpl_set_tcb_field_core *req;
1330 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1331 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1333 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1334 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1335 ulpsc->len = htobe32(sizeof(*req));
1337 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1338 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1339 req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1340 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1341 req->mask = htobe64(mask);
1342 req->val = htobe64(val);
1344 ulpsc = (struct ulptx_idata *)(req + 1);
1345 if (LEN__SET_TCB_FIELD_ULP % 16) {
1346 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1347 ulpsc->len = htobe32(0);
1353 /* ABORT_REQ sent as a ULP command looks like this */
1354 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1355 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1358 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1360 struct ulptx_idata *ulpsc;
1361 struct cpl_abort_req_core *req;
1363 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1364 ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1366 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1367 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1368 ulpsc->len = htobe32(sizeof(*req));
1370 req = (struct cpl_abort_req_core *)(ulpsc + 1);
1371 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1372 req->rsvd0 = htonl(0);
1374 req->cmd = CPL_ABORT_NO_RST;
1376 ulpsc = (struct ulptx_idata *)(req + 1);
1377 if (LEN__ABORT_REQ_ULP % 16) {
1378 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1379 ulpsc->len = htobe32(0);
1385 /* ABORT_RPL sent as a ULP command looks like this */
1386 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1387 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1390 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1392 struct ulptx_idata *ulpsc;
1393 struct cpl_abort_rpl_core *rpl;
1395 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1396 ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1398 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1399 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1400 ulpsc->len = htobe32(sizeof(*rpl));
1402 rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1403 OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1404 rpl->rsvd0 = htonl(0);
1406 rpl->cmd = CPL_ABORT_NO_RST;
1408 ulpsc = (struct ulptx_idata *)(rpl + 1);
1409 if (LEN__ABORT_RPL_ULP % 16) {
1410 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1411 ulpsc->len = htobe32(0);
1418 del_hashfilter_wrlen(void)
1421 return (sizeof(struct work_request_hdr) +
1422 roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1423 roundup2(LEN__ABORT_REQ_ULP, 16) +
1424 roundup2(LEN__ABORT_RPL_ULP, 16));
1428 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1430 struct ulp_txpkt *ulpmc;
1432 INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1433 ulpmc = (struct ulp_txpkt *)(wrh + 1);
1434 ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1435 V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1436 ulpmc = mk_abort_req_ulp(ulpmc, tid);
1437 ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1441 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1444 struct filter_entry *f;
1445 struct wrq_cookie cookie;
1447 const int wrlen = del_hashfilter_wrlen();
1449 MPASS(sc->tids.hftid_tab != NULL);
1450 MPASS(sc->tids.ntids > 0);
1452 if (t->idx >= sc->tids.ntids)
1455 mtx_lock(&sc->tids.hftid_lock);
1456 f = lookup_hftid(sc, t->idx);
1457 if (f == NULL || f->valid == 0) {
1461 MPASS(f->tid == t->idx);
1470 wr = start_wrq_wr(&sc->sge.mgmtq, howmany(wrlen, 16), &cookie);
1476 mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1479 commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1480 t->fs = f->fs; /* extra info for the caller */
1484 if (f->pending == 0) {
1494 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1501 mtx_unlock(&sc->tids.hftid_lock);
1505 #define WORD_MASK 0xffffffff
1507 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1508 const bool sip, const bool dp, const bool sp)
1513 set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1514 f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1515 f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1517 set_tcb_field(sc, f->tid,
1518 W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1519 f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1520 f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1522 set_tcb_field(sc, f->tid,
1523 W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1524 f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1525 f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1527 set_tcb_field(sc, f->tid,
1528 W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1529 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1530 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1532 set_tcb_field(sc, f->tid,
1533 W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1534 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1535 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1541 set_tcb_field(sc, f->tid,
1542 W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1543 f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1544 f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1546 set_tcb_field(sc, f->tid,
1547 W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1548 f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1549 f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1551 set_tcb_field(sc, f->tid,
1552 W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1553 f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1554 f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1556 set_tcb_field(sc, f->tid,
1557 W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1558 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1559 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1562 set_tcb_field(sc, f->tid,
1563 W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1564 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1565 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1569 set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1570 (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1574 * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1575 * last of the series of updates requested a reply. The reply informs the
1576 * driver that the filter is fully setup.
1579 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1583 MPASS(f->tid < sc->tids.ntids);
1586 MPASS(f->valid == 0);
1588 if (f->fs.newdmac) {
1589 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1593 if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1594 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1598 if (f->fs.newsmac) {
1599 MPASS(f->smt != NULL);
1600 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1601 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1602 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1607 switch(f->fs.nat_mode) {
1611 set_nat_params(sc, f, true, false, false, false);
1614 case NAT_MODE_DIP_DP:
1615 set_nat_params(sc, f, true, false, true, false);
1618 case NAT_MODE_DIP_DP_SIP:
1619 set_nat_params(sc, f, true, true, true, false);
1622 case NAT_MODE_DIP_DP_SP:
1623 set_nat_params(sc, f, true, false, true, true);
1626 case NAT_MODE_SIP_SP:
1627 set_nat_params(sc, f, false, true, false, true);
1630 case NAT_MODE_DIP_SIP_SP:
1631 set_nat_params(sc, f, true, true, false, true);
1635 set_nat_params(sc, f, true, true, true, true);
1639 MPASS(0); /* should have been validated earlier */
1644 if (f->fs.nat_seq_chk) {
1645 set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
1646 V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1647 V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
1651 if (is_t5(sc) && f->fs.action == FILTER_DROP) {
1653 * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
1655 set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
1656 V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
1661 * Enable switching after all secondary resources (L2T entry, SMT entry,
1662 * etc.) are setup so that any switched packet will use correct
1665 if (f->fs.action == FILTER_SWITCH) {
1666 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
1670 if (f->fs.hitcnts || updated > 0) {
1671 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1672 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1673 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1674 V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1675 return (EINPROGRESS);