]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/t4_filter.c
MFV r336950: 9290 device removal reduces redundancy of mirrors
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / t4_filter.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
44 #include <sys/sbuf.h>
45 #include <netinet/in.h>
46
47 #include "common/common.h"
48 #include "common/t4_msg.h"
49 #include "common/t4_regs.h"
50 #include "common/t4_regs_values.h"
51 #include "common/t4_tcb.h"
52 #include "t4_l2t.h"
53 #include "t4_smt.h"
54
55 struct filter_entry {
56         uint32_t valid:1;       /* filter allocated and valid */
57         uint32_t locked:1;      /* filter is administratively locked or busy */
58         uint32_t pending:1;     /* filter action is pending firmware reply */
59         int tid;                /* tid of the filter TCB */
60         struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
61         struct smt_entry *smt;  /* SMT entry for SMAC rewrite */
62
63         struct t4_filter_specification fs;
64 };
65
66 static void free_filter_resources(struct filter_entry *);
67 static int get_hashfilter(struct adapter *, struct t4_filter *);
68 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
69     struct l2t_entry *, struct smt_entry *);
70 static int del_hashfilter(struct adapter *, struct t4_filter *);
71 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
72
73 static int
74 alloc_hftid_tab(struct tid_info *t, int flags)
75 {
76
77         MPASS(t->ntids > 0);
78         MPASS(t->hftid_tab == NULL);
79
80         t->hftid_tab = malloc(sizeof(*t->hftid_tab) * t->ntids, M_CXGBE,
81             M_ZERO | flags);
82         if (t->hftid_tab == NULL)
83                 return (ENOMEM);
84         mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
85         cv_init(&t->hftid_cv, "t4hfcv");
86
87         return (0);
88 }
89
90 void
91 free_hftid_tab(struct tid_info *t)
92 {
93         int i;
94
95         if (t->hftid_tab != NULL) {
96                 MPASS(t->ntids > 0);
97                 for (i = 0; t->tids_in_use > 0 && i < t->ntids; i++) {
98                         if (t->hftid_tab[i] == NULL)
99                                 continue;
100                         free(t->hftid_tab[i], M_CXGBE);
101                         t->tids_in_use--;
102                 }
103                 free(t->hftid_tab, M_CXGBE);
104                 t->hftid_tab = NULL;
105         }
106
107         if (mtx_initialized(&t->hftid_lock)) {
108                 mtx_destroy(&t->hftid_lock);
109                 cv_destroy(&t->hftid_cv);
110         }
111 }
112
113 static void
114 insert_hftid(struct adapter *sc, int tid, void *ctx, int ntids)
115 {
116         struct tid_info *t = &sc->tids;
117
118         t->hftid_tab[tid] = ctx;
119         atomic_add_int(&t->tids_in_use, ntids);
120 }
121
122 static void *
123 lookup_hftid(struct adapter *sc, int tid)
124 {
125         struct tid_info *t = &sc->tids;
126
127         return (t->hftid_tab[tid]);
128 }
129
130 static void
131 remove_hftid(struct adapter *sc, int tid, int ntids)
132 {
133         struct tid_info *t = &sc->tids;
134
135         t->hftid_tab[tid] = NULL;
136         atomic_subtract_int(&t->tids_in_use, ntids);
137 }
138
139 static uint32_t
140 mode_to_fconf(uint32_t mode)
141 {
142         uint32_t fconf = 0;
143
144         if (mode & T4_FILTER_IP_FRAGMENT)
145                 fconf |= F_FRAGMENTATION;
146
147         if (mode & T4_FILTER_MPS_HIT_TYPE)
148                 fconf |= F_MPSHITTYPE;
149
150         if (mode & T4_FILTER_MAC_IDX)
151                 fconf |= F_MACMATCH;
152
153         if (mode & T4_FILTER_ETH_TYPE)
154                 fconf |= F_ETHERTYPE;
155
156         if (mode & T4_FILTER_IP_PROTO)
157                 fconf |= F_PROTOCOL;
158
159         if (mode & T4_FILTER_IP_TOS)
160                 fconf |= F_TOS;
161
162         if (mode & T4_FILTER_VLAN)
163                 fconf |= F_VLAN;
164
165         if (mode & T4_FILTER_VNIC)
166                 fconf |= F_VNIC_ID;
167
168         if (mode & T4_FILTER_PORT)
169                 fconf |= F_PORT;
170
171         if (mode & T4_FILTER_FCoE)
172                 fconf |= F_FCOE;
173
174         return (fconf);
175 }
176
177 static uint32_t
178 mode_to_iconf(uint32_t mode)
179 {
180
181         if (mode & T4_FILTER_IC_VNIC)
182                 return (F_VNIC);
183         return (0);
184 }
185
186 static int
187 check_fspec_against_fconf_iconf(struct adapter *sc,
188     struct t4_filter_specification *fs)
189 {
190         struct tp_params *tpp = &sc->params.tp;
191         uint32_t fconf = 0;
192
193         if (fs->val.frag || fs->mask.frag)
194                 fconf |= F_FRAGMENTATION;
195
196         if (fs->val.matchtype || fs->mask.matchtype)
197                 fconf |= F_MPSHITTYPE;
198
199         if (fs->val.macidx || fs->mask.macidx)
200                 fconf |= F_MACMATCH;
201
202         if (fs->val.ethtype || fs->mask.ethtype)
203                 fconf |= F_ETHERTYPE;
204
205         if (fs->val.proto || fs->mask.proto)
206                 fconf |= F_PROTOCOL;
207
208         if (fs->val.tos || fs->mask.tos)
209                 fconf |= F_TOS;
210
211         if (fs->val.vlan_vld || fs->mask.vlan_vld)
212                 fconf |= F_VLAN;
213
214         if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
215                 fconf |= F_VNIC_ID;
216                 if (tpp->ingress_config & F_VNIC)
217                         return (EINVAL);
218         }
219
220         if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
221                 fconf |= F_VNIC_ID;
222                 if ((tpp->ingress_config & F_VNIC) == 0)
223                         return (EINVAL);
224         }
225
226         if (fs->val.iport || fs->mask.iport)
227                 fconf |= F_PORT;
228
229         if (fs->val.fcoe || fs->mask.fcoe)
230                 fconf |= F_FCOE;
231
232         if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
233                 return (E2BIG);
234
235         return (0);
236 }
237
238 int
239 get_filter_mode(struct adapter *sc, uint32_t *mode)
240 {
241         struct tp_params *tp = &sc->params.tp;
242         uint64_t mask;
243
244         /* Non-zero incoming value in mode means "hashfilter mode". */
245         mask = *mode ? tp->hash_filter_mask : UINT64_MAX;
246
247         /* Always */
248         *mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
249             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
250
251 #define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit)  do { \
252         if (tp->vlan_pri_map & (fconf_bit)) { \
253                 MPASS(tp->field_shift >= 0); \
254                 if ((mask >> tp->field_shift & field_mask) == field_mask) \
255                 *mode |= (mode_bit); \
256         } \
257 } while (0)
258
259         CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT);
260         CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE);
261         CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX);
262         CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE);
263         CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO);
264         CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS);
265         CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN);
266         CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC);
267         if (tp->ingress_config & F_VNIC)
268                 *mode |= T4_FILTER_IC_VNIC;
269         CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT);
270         CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE);
271 #undef CHECK_FIELD
272
273         return (0);
274 }
275
276 int
277 set_filter_mode(struct adapter *sc, uint32_t mode)
278 {
279         struct tp_params *tpp = &sc->params.tp;
280         uint32_t fconf, iconf;
281         int rc;
282
283         iconf = mode_to_iconf(mode);
284         if ((iconf ^ tpp->ingress_config) & F_VNIC) {
285                 /*
286                  * For now we just complain if A_TP_INGRESS_CONFIG is not
287                  * already set to the correct value for the requested filter
288                  * mode.  It's not clear if it's safe to write to this register
289                  * on the fly.  (And we trust the cached value of the register).
290                  *
291                  * check_fspec_against_fconf_iconf and other code that looks at
292                  * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
293                  * thorougly before allowing dynamic filter mode changes.
294                  */
295                 return (EBUSY);
296         }
297
298         fconf = mode_to_fconf(mode);
299
300         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
301             "t4setfm");
302         if (rc)
303                 return (rc);
304
305         if (sc->tids.ftids_in_use > 0) {
306                 rc = EBUSY;
307                 goto done;
308         }
309
310 #ifdef TCP_OFFLOAD
311         if (uld_active(sc, ULD_TOM)) {
312                 rc = EBUSY;
313                 goto done;
314         }
315 #endif
316
317         rc = -t4_set_filter_mode(sc, fconf, true);
318 done:
319         end_synchronized_op(sc, LOCK_HELD);
320         return (rc);
321 }
322
323 static inline uint64_t
324 get_filter_hits(struct adapter *sc, uint32_t tid)
325 {
326         uint32_t tcb_addr;
327
328         tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
329
330         if (is_t4(sc)) {
331                 uint64_t hits;
332
333                 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
334                 return (be64toh(hits));
335         } else {
336                 uint32_t hits;
337
338                 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
339                 return (be32toh(hits));
340         }
341 }
342
343 int
344 get_filter(struct adapter *sc, struct t4_filter *t)
345 {
346         int i, nfilters = sc->tids.nftids;
347         struct filter_entry *f;
348
349         if (t->fs.hash)
350                 return (get_hashfilter(sc, t));
351
352         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
353             t->idx >= nfilters) {
354                 t->idx = 0xffffffff;
355                 return (0);
356         }
357
358         mtx_lock(&sc->tids.ftid_lock);
359         f = &sc->tids.ftid_tab[t->idx];
360         for (i = t->idx; i < nfilters; i++, f++) {
361                 if (f->valid) {
362                         MPASS(f->tid == sc->tids.ftid_base + i);
363                         t->idx = i;
364                         t->l2tidx = f->l2te ? f->l2te->idx : 0;
365                         t->smtidx = f->smt ? f->smt->idx : 0;
366                         if (f->fs.hitcnts)
367                                 t->hits = get_filter_hits(sc, f->tid);
368                         else
369                                 t->hits = UINT64_MAX;
370                         t->fs = f->fs;
371
372                         goto done;
373                 }
374         }
375         t->idx = 0xffffffff;
376 done:
377         mtx_unlock(&sc->tids.ftid_lock);
378         return (0);
379 }
380
381 static int
382 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
383     struct smt_entry *smt)
384 {
385         struct filter_entry *f;
386         struct fw_filter2_wr *fwr;
387         u_int vnic_vld, vnic_vld_mask;
388         struct wrq_cookie cookie;
389         int i, rc, busy, locked;
390         const int ntids = t->fs.type ? 4 : 1;
391
392         MPASS(!t->fs.hash);
393         MPASS(t->idx < sc->tids.nftids);
394         /* Already validated against fconf, iconf */
395         MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
396         MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
397
398         f = &sc->tids.ftid_tab[t->idx];
399         rc = busy = locked = 0;
400         mtx_lock(&sc->tids.ftid_lock);
401         for (i = 0; i < ntids; i++) {
402                 busy += f[i].pending + f[i].valid;
403                 locked += f[i].locked;
404         }
405         if (locked > 0)
406                 rc = EPERM;
407         else if (busy > 0)
408                 rc = EBUSY;
409         else {
410                 int len16;
411
412                 if (sc->params.filter2_wr_support)
413                         len16 = howmany(sizeof(struct fw_filter2_wr), 16);
414                 else
415                         len16 = howmany(sizeof(struct fw_filter_wr), 16);
416                 fwr = start_wrq_wr(&sc->sge.mgmtq, len16, &cookie);
417                 if (__predict_false(fwr == NULL))
418                         rc = ENOMEM;
419                 else {
420                         f->pending = 1;
421                         sc->tids.ftids_in_use++;
422                 }
423         }
424         mtx_unlock(&sc->tids.ftid_lock);
425         if (rc != 0) {
426                 if (l2te)
427                         t4_l2t_release(l2te);
428                 if (smt)
429                         t4_smt_release(smt);
430                 return (rc);
431         }
432
433         /*
434          * Can't fail now.  A set-filter WR will definitely be sent.
435          */
436
437         f->tid = sc->tids.ftid_base + t->idx;
438         f->fs = t->fs;
439         f->l2te = l2te;
440         f->smt = smt;
441
442         if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
443                 vnic_vld = 1;
444         else
445                 vnic_vld = 0;
446         if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
447                 vnic_vld_mask = 1;
448         else
449                 vnic_vld_mask = 0;
450
451         bzero(fwr, sizeof(*fwr));
452         if (sc->params.filter2_wr_support)
453                 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
454         else
455                 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
456         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
457         fwr->tid_to_iq =
458             htobe32(V_FW_FILTER_WR_TID(f->tid) |
459                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
460                 V_FW_FILTER_WR_NOREPLY(0) |
461                 V_FW_FILTER_WR_IQ(f->fs.iq));
462         fwr->del_filter_to_l2tix =
463             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
464                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
465                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
466                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
467                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
468                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
469                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
470                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
471                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
472                     f->fs.newvlan == VLAN_REWRITE) |
473                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
474                     f->fs.newvlan == VLAN_REWRITE) |
475                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
476                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
477                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
478                 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
479         fwr->ethtype = htobe16(f->fs.val.ethtype);
480         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
481         fwr->frag_to_ovlan_vldm =
482             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
483                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
484                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
485                 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
486                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
487                 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
488         fwr->smac_sel = 0;
489         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
490             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
491         fwr->maci_to_matchtypem =
492             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
493                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
494                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
495                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
496                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
497                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
498                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
499                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
500         fwr->ptcl = f->fs.val.proto;
501         fwr->ptclm = f->fs.mask.proto;
502         fwr->ttyp = f->fs.val.tos;
503         fwr->ttypm = f->fs.mask.tos;
504         fwr->ivlan = htobe16(f->fs.val.vlan);
505         fwr->ivlanm = htobe16(f->fs.mask.vlan);
506         fwr->ovlan = htobe16(f->fs.val.vnic);
507         fwr->ovlanm = htobe16(f->fs.mask.vnic);
508         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
509         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
510         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
511         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
512         fwr->lp = htobe16(f->fs.val.dport);
513         fwr->lpm = htobe16(f->fs.mask.dport);
514         fwr->fp = htobe16(f->fs.val.sport);
515         fwr->fpm = htobe16(f->fs.mask.sport);
516         /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
517         bzero(fwr->sma, sizeof (fwr->sma));
518         if (sc->params.filter2_wr_support) {
519                 fwr->filter_type_swapmac =
520                     V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
521                 fwr->natmode_to_ulp_type =
522                     V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
523                         ULP_MODE_TCPDDP : ULP_MODE_NONE) |
524                     V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
525                     V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
526                 memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
527                 memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
528                 fwr->newlport = htobe16(f->fs.nat_dport);
529                 fwr->newfport = htobe16(f->fs.nat_sport);
530                 fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
531         }
532         commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
533
534         /* Wait for response. */
535         mtx_lock(&sc->tids.ftid_lock);
536         for (;;) {
537                 if (f->pending == 0) {
538                         rc = f->valid ? 0 : EIO;
539                         break;
540                 }
541                 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
542                         rc = EINPROGRESS;
543                         break;
544                 }
545         }
546         mtx_unlock(&sc->tids.ftid_lock);
547         return (rc);
548 }
549
550 static int
551 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
552     uint64_t *ftuple)
553 {
554         struct tp_params *tp = &sc->params.tp;
555         uint64_t fmask;
556
557         *ftuple = fmask = 0;
558
559         /*
560          * Initialize each of the fields which we care about which are present
561          * in the Compressed Filter Tuple.
562          */
563         if (tp->vlan_shift >= 0 && fs->mask.vlan) {
564                 *ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
565                 fmask |= M_FT_VLAN << tp->vlan_shift;
566         }
567
568         if (tp->port_shift >= 0 && fs->mask.iport) {
569                 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
570                 fmask |= M_FT_PORT << tp->port_shift;
571         }
572
573         if (tp->protocol_shift >= 0 && fs->mask.proto) {
574                 *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
575                 fmask |= M_FT_PROTOCOL << tp->protocol_shift;
576         }
577
578         if (tp->tos_shift >= 0 && fs->mask.tos) {
579                 *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
580                 fmask |= M_FT_TOS << tp->tos_shift;
581         }
582
583         if (tp->vnic_shift >= 0 && fs->mask.vnic) {
584                 /* F_VNIC in ingress config was already validated. */
585                 if (tp->ingress_config & F_VNIC)
586                         MPASS(fs->mask.pfvf_vld);
587                 else
588                         MPASS(fs->mask.ovlan_vld);
589
590                 *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
591                 fmask |= M_FT_VNIC_ID << tp->vnic_shift;
592         }
593
594         if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
595                 *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
596                 fmask |= M_FT_MACMATCH << tp->macmatch_shift;
597         }
598
599         if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
600                 *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
601                 fmask |= M_FT_ETHERTYPE << tp->ethertype_shift;
602         }
603
604         if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
605                 *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
606                 fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift;
607         }
608
609         if (tp->frag_shift >= 0 && fs->mask.frag) {
610                 *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
611                 fmask |= M_FT_FRAGMENTATION << tp->frag_shift;
612         }
613
614         if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
615                 *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
616                 fmask |= M_FT_FCOE << tp->fcoe_shift;
617         }
618
619         /* A hashfilter must conform to the filterMask. */
620         if (fmask != tp->hash_filter_mask)
621                 return (EINVAL);
622
623         return (0);
624 }
625
626 int
627 set_filter(struct adapter *sc, struct t4_filter *t)
628 {
629         struct tid_info *ti = &sc->tids;
630         struct l2t_entry *l2te;
631         struct smt_entry *smt;
632         uint64_t ftuple;
633         int rc;
634
635         /*
636          * Basic filter checks first.
637          */
638
639         if (t->fs.hash) {
640                 if (!is_hashfilter(sc) || ti->ntids == 0)
641                         return (ENOTSUP);
642                 /* Hardware, not user, selects a tid for hashfilters. */
643                 if (t->idx != (uint32_t)-1)
644                         return (EINVAL);
645                 /* T5 can't count hashfilter hits. */
646                 if (is_t5(sc) && t->fs.hitcnts)
647                         return (EINVAL);
648                 rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
649                 if (rc != 0)
650                         return (rc);
651         } else {
652                 if (ti->nftids == 0)
653                         return (ENOTSUP);
654                 if (t->idx >= ti->nftids)
655                         return (EINVAL);
656                 /* IPv6 filter idx must be 4 aligned */
657                 if (t->fs.type == 1 &&
658                     ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
659                         return (EINVAL);
660         }
661
662         /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
663         if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
664             (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
665             t->fs.swapmac || t->fs.nat_mode))
666                 return (ENOTSUP);
667
668         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
669                 return (EINVAL);
670         if (t->fs.val.iport >= sc->params.nports)
671                 return (EINVAL);
672
673         /* Can't specify an iq if not steering to it */
674         if (!t->fs.dirsteer && t->fs.iq)
675                 return (EINVAL);
676
677         /* Validate against the global filter mode and ingress config */
678         rc = check_fspec_against_fconf_iconf(sc, &t->fs);
679         if (rc != 0)
680                 return (rc);
681
682         /*
683          * Basic checks passed.  Make sure the queues and tid tables are setup.
684          */
685
686         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
687         if (rc)
688                 return (rc);
689         if (!(sc->flags & FULL_INIT_DONE) &&
690             ((rc = adapter_full_init(sc)) != 0)) {
691                 end_synchronized_op(sc, 0);
692                 return (rc);
693         }
694         if (t->fs.hash) {
695                 if (__predict_false(ti->hftid_tab == NULL)) {
696                         rc = alloc_hftid_tab(&sc->tids, M_NOWAIT);
697                         if (rc != 0)
698                                 goto done;
699                 }
700                 if (__predict_false(sc->tids.atid_tab == NULL)) {
701                         rc = alloc_atid_tab(&sc->tids, M_NOWAIT);
702                         if (rc != 0)
703                                 goto done;
704                 }
705         } else if (__predict_false(ti->ftid_tab == NULL)) {
706                 KASSERT(ti->ftids_in_use == 0,
707                     ("%s: no memory allocated but ftids_in_use > 0", __func__));
708                 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
709                     M_CXGBE, M_NOWAIT | M_ZERO);
710                 if (ti->ftid_tab == NULL) {
711                         rc = ENOMEM;
712                         goto done;
713                 }
714                 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
715                 cv_init(&ti->ftid_cv, "t4fcv");
716         }
717 done:
718         end_synchronized_op(sc, 0);
719         if (rc != 0)
720                 return (rc);
721
722         /*
723          * Allocate L2T entry, SMT entry, etc.
724          */
725
726         l2te = NULL;
727         if (t->fs.newdmac || t->fs.newvlan) {
728                 /* This filter needs an L2T entry; allocate one. */
729                 l2te = t4_l2t_alloc_switching(sc->l2t);
730                 if (__predict_false(l2te == NULL))
731                         return (EAGAIN);
732                 rc = t4_l2t_set_switching(sc, l2te, t->fs.vlan, t->fs.eport,
733                     t->fs.dmac);
734                 if (rc) {
735                         t4_l2t_release(l2te);
736                         return (ENOMEM);
737                 }
738         }
739
740         smt = NULL;
741         if (t->fs.newsmac) {
742                 /* This filter needs an SMT entry; allocate one. */
743                 smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
744                 if (__predict_false(smt == NULL)) {
745                         if (l2te != NULL)
746                                 t4_l2t_release(l2te);
747                         return (EAGAIN);
748                 }
749                 rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
750                 if (rc) {
751                         t4_smt_release(smt);
752                         if (l2te != NULL)
753                                 t4_l2t_release(l2te);
754                         return (rc);
755                 }
756         }
757
758         if (t->fs.hash)
759                 return (set_hashfilter(sc, t, ftuple, l2te, smt));
760         else
761                 return (set_tcamfilter(sc, t, l2te, smt));
762
763 }
764
765 static int
766 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
767 {
768         struct filter_entry *f;
769         struct fw_filter_wr *fwr;
770         struct wrq_cookie cookie;
771         int rc;
772
773         MPASS(sc->tids.ftid_tab != NULL);
774         MPASS(sc->tids.nftids > 0);
775
776         if (t->idx >= sc->tids.nftids)
777                 return (EINVAL);
778
779         mtx_lock(&sc->tids.ftid_lock);
780         f = &sc->tids.ftid_tab[t->idx];
781         if (f->locked) {
782                 rc = EPERM;
783                 goto done;
784         }
785         if (f->pending) {
786                 rc = EBUSY;
787                 goto done;
788         }
789         if (f->valid == 0) {
790                 rc = EINVAL;
791                 goto done;
792         }
793         MPASS(f->tid == sc->tids.ftid_base + t->idx);
794         fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
795         if (fwr == NULL) {
796                 rc = ENOMEM;
797                 goto done;
798         }
799
800         bzero(fwr, sizeof (*fwr));
801         t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
802         f->pending = 1;
803         commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
804         t->fs = f->fs;  /* extra info for the caller */
805
806         for (;;) {
807                 if (f->pending == 0) {
808                         rc = f->valid ? EIO : 0;
809                         break;
810                 }
811                 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
812                         rc = EINPROGRESS;
813                         break;
814                 }
815         }
816 done:
817         mtx_unlock(&sc->tids.ftid_lock);
818         return (rc);
819 }
820
821 int
822 del_filter(struct adapter *sc, struct t4_filter *t)
823 {
824
825         /* No filters possible if not initialized yet. */
826         if (!(sc->flags & FULL_INIT_DONE))
827                 return (EINVAL);
828
829         /*
830          * The checks for tid tables ensure that the locks that del_* will reach
831          * for are initialized.
832          */
833         if (t->fs.hash) {
834                 if (sc->tids.hftid_tab != NULL)
835                         return (del_hashfilter(sc, t));
836         } else {
837                 if (sc->tids.ftid_tab != NULL)
838                         return (del_tcamfilter(sc, t));
839         }
840
841         return (EINVAL);
842 }
843
844 /*
845  * Release secondary resources associated with the filter.
846  */
847 static void
848 free_filter_resources(struct filter_entry *f)
849 {
850
851         if (f->l2te) {
852                 t4_l2t_release(f->l2te);
853                 f->l2te = NULL;
854         }
855         if (f->smt) {
856                 t4_smt_release(f->smt);
857                 f->smt = NULL;
858         }
859 }
860
861 static int
862 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
863     uint64_t val, int no_reply)
864 {
865         struct wrq_cookie cookie;
866         struct cpl_set_tcb_field *req;
867
868         req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie);
869         if (req == NULL)
870                 return (ENOMEM);
871         bzero(req, sizeof(*req));
872         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
873         if (no_reply == 0) {
874                 req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
875                     V_NO_REPLY(0));
876         } else
877                 req->reply_ctrl = htobe16(V_NO_REPLY(1));
878         req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
879         req->mask = htobe64(mask);
880         req->val = htobe64(val);
881         commit_wrq_wr(&sc->sge.mgmtq, req, &cookie);
882
883         return (0);
884 }
885
886 /* Set one of the t_flags bits in the TCB. */
887 static inline int
888 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
889     u_int no_reply)
890 {
891
892         return (set_tcb_field(sc, tid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
893             (uint64_t)val << bit_pos, no_reply));
894 }
895
896 int
897 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
898 {
899         struct adapter *sc = iq->adapter;
900         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
901         u_int tid = GET_TID(rpl);
902         u_int rc, cleanup, idx;
903         struct filter_entry *f;
904
905         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
906             rss->opcode));
907         MPASS(is_ftid(sc, tid));
908
909         cleanup = 0;
910         idx = tid - sc->tids.ftid_base;
911         f = &sc->tids.ftid_tab[idx];
912         rc = G_COOKIE(rpl->cookie);
913
914         mtx_lock(&sc->tids.ftid_lock);
915         KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
916             __func__, rc, idx));
917         switch(rc) {
918         case FW_FILTER_WR_FLT_ADDED:
919                 /* set-filter succeeded */
920                 f->valid = 1;
921                 if (f->fs.newsmac) {
922                         MPASS(f->smt != NULL);
923                         set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
924                         set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
925                             V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
926                             V_TCB_SMAC_SEL(f->smt->idx), 1);
927                         /* XXX: wait for reply to TCB update before !pending */
928                 }
929                 break;
930         case FW_FILTER_WR_FLT_DELETED:
931                 /* del-filter succeeded */
932                 MPASS(f->valid == 1);
933                 f->valid = 0;
934                 /* Fall through */
935         case FW_FILTER_WR_SMT_TBL_FULL:
936                 /* set-filter failed due to lack of SMT space. */
937                 MPASS(f->valid == 0);
938                 free_filter_resources(f);
939                 sc->tids.ftids_in_use--;
940                 break;
941         case FW_FILTER_WR_SUCCESS:
942         case FW_FILTER_WR_EINVAL:
943         default:
944                 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
945                     idx);
946         }
947         f->pending = 0;
948         cv_broadcast(&sc->tids.ftid_cv);
949         mtx_unlock(&sc->tids.ftid_lock);
950
951         return (0);
952 }
953
954 /*
955  * This is the reply to the Active Open that created the filter.  Additional TCB
956  * updates may be required to complete the filter configuration.
957  */
958 int
959 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
960     struct mbuf *m)
961 {
962         struct adapter *sc = iq->adapter;
963         const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
964         u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
965         u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
966         struct filter_entry *f = lookup_atid(sc, atid);
967
968         KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
969
970         mtx_lock(&sc->tids.hftid_lock);
971         KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
972         KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
973             __func__, f, f->tid));
974         if (status == CPL_ERR_NONE) {
975                 struct filter_entry *f2;
976
977                 f->tid = GET_TID(cpl);
978                 MPASS(f->tid < sc->tids.ntids);
979                 if (__predict_false((f2 = lookup_hftid(sc, f->tid)) != NULL)) {
980                         /* XXX: avoid hash collisions in the first place. */
981                         MPASS(f2->tid == f->tid);
982                         remove_hftid(sc, f2->tid, f2->fs.type ? 2 : 1);
983                         free_filter_resources(f2);
984                         free(f2, M_CXGBE);
985                 }
986                 insert_hftid(sc, f->tid, f, f->fs.type ? 2 : 1);
987                 /*
988                  * Leave the filter pending until it is fully set up, which will
989                  * be indicated by the reply to the last TCB update.  No need to
990                  * unblock the ioctl thread either.
991                  */
992                 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
993                         goto done;
994                 f->valid = 1;
995                 f->pending = 0;
996         } else {
997                 /* provide errno instead of tid to ioctl */
998                 f->tid = act_open_rpl_status_to_errno(status);
999                 f->valid = 0;
1000                 if (act_open_has_tid(status))
1001                         release_tid(sc, GET_TID(cpl), &sc->sge.mgmtq);
1002                 free_filter_resources(f);
1003                 if (f->locked == 0)
1004                         free(f, M_CXGBE);
1005         }
1006         cv_broadcast(&sc->tids.hftid_cv);
1007 done:
1008         mtx_unlock(&sc->tids.hftid_lock);
1009
1010         free_atid(sc, atid);
1011         return (0);
1012 }
1013
1014 int
1015 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1016     struct mbuf *m)
1017 {
1018         struct adapter *sc = iq->adapter;
1019         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1020         u_int tid = GET_TID(rpl);
1021         struct filter_entry *f;
1022
1023         mtx_lock(&sc->tids.hftid_lock);
1024         f = lookup_hftid(sc, tid);
1025         KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1026         KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1027             f, tid));
1028         KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1029             __func__, f, tid));
1030         f->pending = 0;
1031         if (rpl->status == 0) {
1032                 f->valid = 1;
1033         } else {
1034                 f->tid = EIO;
1035                 f->valid = 0;
1036                 free_filter_resources(f);
1037                 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
1038                 release_tid(sc, tid, &sc->sge.mgmtq);
1039                 if (f->locked == 0)
1040                         free(f, M_CXGBE);
1041         }
1042         cv_broadcast(&sc->tids.hftid_cv);
1043         mtx_unlock(&sc->tids.hftid_lock);
1044
1045         return (0);
1046 }
1047
1048 int
1049 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1050     struct mbuf *m)
1051 {
1052         struct adapter *sc = iq->adapter;
1053         const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1054         unsigned int tid = GET_TID(cpl);
1055         struct filter_entry *f;
1056
1057         mtx_lock(&sc->tids.hftid_lock);
1058         f = lookup_hftid(sc, tid);
1059         KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1060         KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1061             f, tid));
1062         KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1063             tid));
1064         f->pending = 0;
1065         if (cpl->status == 0) {
1066                 f->valid = 0;
1067                 free_filter_resources(f);
1068                 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
1069                 release_tid(sc, tid, &sc->sge.mgmtq);
1070                 if (f->locked == 0)
1071                         free(f, M_CXGBE);
1072         }
1073         cv_broadcast(&sc->tids.hftid_cv);
1074         mtx_unlock(&sc->tids.hftid_lock);
1075
1076         return (0);
1077 }
1078
1079 static int
1080 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1081 {
1082         int i, nfilters = sc->tids.ntids;
1083         struct filter_entry *f;
1084
1085         if (sc->tids.tids_in_use == 0 || sc->tids.hftid_tab == NULL ||
1086             t->idx >= nfilters) {
1087                 t->idx = 0xffffffff;
1088                 return (0);
1089         }
1090
1091         mtx_lock(&sc->tids.hftid_lock);
1092         for (i = t->idx; i < nfilters; i++) {
1093                 f = lookup_hftid(sc, i);
1094                 if (f != NULL && f->valid) {
1095                         t->idx = i;
1096                         t->l2tidx = f->l2te ? f->l2te->idx : 0;
1097                         t->smtidx = f->smt ? f->smt->idx : 0;
1098                         if (f->fs.hitcnts)
1099                                 t->hits = get_filter_hits(sc, t->idx);
1100                         else
1101                                 t->hits = UINT64_MAX;
1102                         t->fs = f->fs;
1103
1104                         goto done;
1105                 }
1106         }
1107         t->idx = 0xffffffff;
1108 done:
1109         mtx_unlock(&sc->tids.hftid_lock);
1110         return (0);
1111 }
1112
1113 static void
1114 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1115     uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1116 {
1117         struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1118         struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1119
1120         /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1121         MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1122         MPASS(atid >= 0);
1123
1124         if (chip_id(sc) == CHELSIO_T5) {
1125                 INIT_TP_WR(cpl5, 0);
1126         } else {
1127                 INIT_TP_WR(cpl6, 0);
1128                 cpl6->rsvd2 = 0;
1129                 cpl6->opt3 = 0;
1130         }
1131
1132         OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1133             V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1134             V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1135         cpl->local_port = htobe16(f->fs.val.dport);
1136         cpl->peer_port = htobe16(f->fs.val.sport);
1137         cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1138         cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1139         cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1140         cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1141         cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1142             f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1143             V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1144             V_NO_CONG(f->fs.rpttid) |
1145             V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1146             F_TCAM_BYPASS | F_NON_OFFLOAD);
1147
1148         cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1149         cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1150             V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1151             V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1152             F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1153             V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1154             V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1155 }
1156
1157 static void
1158 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1159     uint64_t ftuple, struct cpl_act_open_req *cpl)
1160 {
1161         struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1162         struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1163
1164         /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1165         MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1166         MPASS(atid >= 0);
1167
1168         if (chip_id(sc) == CHELSIO_T5) {
1169                 INIT_TP_WR(cpl5, 0);
1170         } else {
1171                 INIT_TP_WR(cpl6, 0);
1172                 cpl6->rsvd2 = 0;
1173                 cpl6->opt3 = 0;
1174         }
1175
1176         OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1177             V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1178             V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1179         cpl->local_port = htobe16(f->fs.val.dport);
1180         cpl->peer_port = htobe16(f->fs.val.sport);
1181         cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1182             f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1183         cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1184                 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1185         cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1186             f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1187             V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1188             V_NO_CONG(f->fs.rpttid) |
1189             V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1190             F_TCAM_BYPASS | F_NON_OFFLOAD);
1191
1192         cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1193         cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1194             V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1195             V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1196             F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1197             V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1198             V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1199 }
1200
1201 static int
1202 act_open_cpl_len16(struct adapter *sc, int isipv6)
1203 {
1204         int idx;
1205         static const int sz_table[3][2] = {
1206                 {
1207                         howmany(sizeof (struct cpl_act_open_req), 16),
1208                         howmany(sizeof (struct cpl_act_open_req6), 16)
1209                 },
1210                 {
1211                         howmany(sizeof (struct cpl_t5_act_open_req), 16),
1212                         howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1213                 },
1214                 {
1215                         howmany(sizeof (struct cpl_t6_act_open_req), 16),
1216                         howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1217                 },
1218         };
1219
1220         MPASS(chip_id(sc) >= CHELSIO_T4);
1221         idx = min(chip_id(sc) - CHELSIO_T4, 2);
1222
1223         return (sz_table[idx][!!isipv6]);
1224 }
1225
1226 static int
1227 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1228     struct l2t_entry *l2te, struct smt_entry *smt)
1229 {
1230         void *wr;
1231         struct wrq_cookie cookie;
1232         struct filter_entry *f;
1233         int rc, atid = -1;
1234
1235         MPASS(t->fs.hash);
1236         /* Already validated against fconf, iconf */
1237         MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1238         MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1239
1240         mtx_lock(&sc->tids.hftid_lock);
1241
1242         /*
1243          * XXX: Check for hash collisions and insert in the hash based lookup
1244          * table so that in-flight hashfilters are also considered when checking
1245          * for collisions.
1246          */
1247
1248         f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1249         if (__predict_false(f == NULL)) {
1250                 if (l2te)
1251                         t4_l2t_release(l2te);
1252                 if (smt)
1253                         t4_smt_release(smt);
1254                 rc = ENOMEM;
1255                 goto done;
1256         }
1257         f->fs = t->fs;
1258         f->l2te = l2te;
1259         f->smt = smt;
1260
1261         atid = alloc_atid(sc, f);
1262         if (__predict_false(atid) == -1) {
1263                 if (l2te)
1264                         t4_l2t_release(l2te);
1265                 if (smt)
1266                         t4_smt_release(smt);
1267                 free(f, M_CXGBE);
1268                 rc = EAGAIN;
1269                 goto done;
1270         }
1271         MPASS(atid >= 0);
1272
1273         wr = start_wrq_wr(&sc->sge.mgmtq, act_open_cpl_len16(sc, f->fs.type),
1274             &cookie);
1275         if (wr == NULL) {
1276                 free_atid(sc, atid);
1277                 if (l2te)
1278                         t4_l2t_release(l2te);
1279                 if (smt)
1280                         t4_smt_release(smt);
1281                 free(f, M_CXGBE);
1282                 rc = ENOMEM;
1283                 goto done;
1284         }
1285         if (f->fs.type)
1286                 mk_act_open_req6(sc, f, atid, ftuple, wr);
1287         else
1288                 mk_act_open_req(sc, f, atid, ftuple, wr);
1289
1290         f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1291         f->pending = 1;
1292         f->tid = -1;
1293         commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1294
1295         for (;;) {
1296                 MPASS(f->locked);
1297                 if (f->pending == 0) {
1298                         if (f->valid) {
1299                                 rc = 0;
1300                                 f->locked = 0;
1301                                 t->idx = f->tid;
1302                         } else {
1303                                 rc = f->tid;
1304                                 free(f, M_CXGBE);
1305                         }
1306                         break;
1307                 }
1308                 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1309                         f->locked = 0;
1310                         rc = EINPROGRESS;
1311                         break;
1312                 }
1313         }
1314 done:
1315         mtx_unlock(&sc->tids.hftid_lock);
1316         return (rc);
1317 }
1318
1319 /* SET_TCB_FIELD sent as a ULP command looks like this */
1320 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1321     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1322
1323 static void *
1324 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1325                 uint64_t val, uint32_t tid, uint32_t qid)
1326 {
1327         struct ulptx_idata *ulpsc;
1328         struct cpl_set_tcb_field_core *req;
1329
1330         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1331         ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1332
1333         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1334         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1335         ulpsc->len = htobe32(sizeof(*req));
1336
1337         req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1338         OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1339         req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1340         req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1341         req->mask = htobe64(mask);
1342         req->val = htobe64(val);
1343
1344         ulpsc = (struct ulptx_idata *)(req + 1);
1345         if (LEN__SET_TCB_FIELD_ULP % 16) {
1346                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1347                 ulpsc->len = htobe32(0);
1348                 return (ulpsc + 1);
1349         }
1350         return (ulpsc);
1351 }
1352
1353 /* ABORT_REQ sent as a ULP command looks like this */
1354 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1355         sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1356
1357 static void *
1358 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1359 {
1360         struct ulptx_idata *ulpsc;
1361         struct cpl_abort_req_core *req;
1362
1363         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1364         ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1365
1366         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1367         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1368         ulpsc->len = htobe32(sizeof(*req));
1369
1370         req = (struct cpl_abort_req_core *)(ulpsc + 1);
1371         OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1372         req->rsvd0 = htonl(0);
1373         req->rsvd1 = 0;
1374         req->cmd = CPL_ABORT_NO_RST;
1375
1376         ulpsc = (struct ulptx_idata *)(req + 1);
1377         if (LEN__ABORT_REQ_ULP % 16) {
1378                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1379                 ulpsc->len = htobe32(0);
1380                 return (ulpsc + 1);
1381         }
1382         return (ulpsc);
1383 }
1384
1385 /* ABORT_RPL sent as a ULP command looks like this */
1386 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1387         sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1388
1389 static void *
1390 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1391 {
1392         struct ulptx_idata *ulpsc;
1393         struct cpl_abort_rpl_core *rpl;
1394
1395         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1396         ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1397
1398         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1399         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1400         ulpsc->len = htobe32(sizeof(*rpl));
1401
1402         rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1403         OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1404         rpl->rsvd0 = htonl(0);
1405         rpl->rsvd1 = 0;
1406         rpl->cmd = CPL_ABORT_NO_RST;
1407
1408         ulpsc = (struct ulptx_idata *)(rpl + 1);
1409         if (LEN__ABORT_RPL_ULP % 16) {
1410                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1411                 ulpsc->len = htobe32(0);
1412                 return (ulpsc + 1);
1413         }
1414         return (ulpsc);
1415 }
1416
1417 static inline int
1418 del_hashfilter_wrlen(void)
1419 {
1420
1421         return (sizeof(struct work_request_hdr) +
1422             roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1423             roundup2(LEN__ABORT_REQ_ULP, 16) +
1424             roundup2(LEN__ABORT_RPL_ULP, 16));
1425 }
1426
1427 static void
1428 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1429 {
1430         struct ulp_txpkt *ulpmc;
1431
1432         INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1433         ulpmc = (struct ulp_txpkt *)(wrh + 1);
1434         ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1435             V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1436         ulpmc = mk_abort_req_ulp(ulpmc, tid);
1437         ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1438 }
1439
1440 static int
1441 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1442 {
1443         void *wr;
1444         struct filter_entry *f;
1445         struct wrq_cookie cookie;
1446         int rc;
1447         const int wrlen = del_hashfilter_wrlen();
1448
1449         MPASS(sc->tids.hftid_tab != NULL);
1450         MPASS(sc->tids.ntids > 0);
1451
1452         if (t->idx >= sc->tids.ntids)
1453                 return (EINVAL);
1454
1455         mtx_lock(&sc->tids.hftid_lock);
1456         f = lookup_hftid(sc, t->idx);
1457         if (f == NULL || f->valid == 0) {
1458                 rc = EINVAL;
1459                 goto done;
1460         }
1461         MPASS(f->tid == t->idx);
1462         if (f->locked) {
1463                 rc = EPERM;
1464                 goto done;
1465         }
1466         if (f->pending) {
1467                 rc = EBUSY;
1468                 goto done;
1469         }
1470         wr = start_wrq_wr(&sc->sge.mgmtq, howmany(wrlen, 16), &cookie);
1471         if (wr == NULL) {
1472                 rc = ENOMEM;
1473                 goto done;
1474         }
1475
1476         mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1477         f->locked = 1;
1478         f->pending = 1;
1479         commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1480         t->fs = f->fs;  /* extra info for the caller */
1481
1482         for (;;) {
1483                 MPASS(f->locked);
1484                 if (f->pending == 0) {
1485                         if (f->valid) {
1486                                 f->locked = 0;
1487                                 rc = EIO;
1488                         } else {
1489                                 rc = 0;
1490                                 free(f, M_CXGBE);
1491                         }
1492                         break;
1493                 }
1494                 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1495                         f->locked = 0;
1496                         rc = EINPROGRESS;
1497                         break;
1498                 }
1499         }
1500 done:
1501         mtx_unlock(&sc->tids.hftid_lock);
1502         return (rc);
1503 }
1504
1505 #define WORD_MASK       0xffffffff
1506 static void
1507 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1508     const bool sip, const bool dp, const bool sp)
1509 {
1510
1511         if (dip) {
1512                 if (f->fs.type) {
1513                         set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1514                             f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1515                             f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1516
1517                         set_tcb_field(sc, f->tid,
1518                             W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1519                             f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1520                             f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1521
1522                         set_tcb_field(sc, f->tid,
1523                             W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1524                             f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1525                             f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1526
1527                         set_tcb_field(sc, f->tid,
1528                             W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1529                             f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1530                             f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1531                 } else {
1532                         set_tcb_field(sc, f->tid,
1533                             W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1534                             f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1535                             f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1536                 }
1537         }
1538
1539         if (sip) {
1540                 if (f->fs.type) {
1541                         set_tcb_field(sc, f->tid,
1542                             W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1543                             f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1544                             f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1545
1546                         set_tcb_field(sc, f->tid,
1547                             W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1548                             f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1549                             f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1550
1551                         set_tcb_field(sc, f->tid,
1552                             W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1553                             f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1554                             f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1555
1556                         set_tcb_field(sc, f->tid,
1557                             W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1558                             f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1559                             f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1560
1561                 } else {
1562                         set_tcb_field(sc, f->tid,
1563                             W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1564                             f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1565                             f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1566                 }
1567         }
1568
1569         set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1570             (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1571 }
1572
1573 /*
1574  * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1575  * last of the series of updates requested a reply.  The reply informs the
1576  * driver that the filter is fully setup.
1577  */
1578 static int
1579 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1580 {
1581         int updated = 0;
1582
1583         MPASS(f->tid < sc->tids.ntids);
1584         MPASS(f->fs.hash);
1585         MPASS(f->pending);
1586         MPASS(f->valid == 0);
1587
1588         if (f->fs.newdmac) {
1589                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1590                 updated++;
1591         }
1592
1593         if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1594                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1595                 updated++;
1596         }
1597
1598         if (f->fs.newsmac) {
1599                 MPASS(f->smt != NULL);
1600                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1601                 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1602                     V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1603                     1);
1604                 updated++;
1605         }
1606
1607         switch(f->fs.nat_mode) {
1608         case NAT_MODE_NONE:
1609                 break;
1610         case NAT_MODE_DIP:
1611                 set_nat_params(sc, f, true, false, false, false);
1612                 updated++;
1613                 break;
1614         case NAT_MODE_DIP_DP:
1615                 set_nat_params(sc, f, true, false, true, false);
1616                 updated++;
1617                 break;
1618         case NAT_MODE_DIP_DP_SIP:
1619                 set_nat_params(sc, f, true, true, true, false);
1620                 updated++;
1621                 break;
1622         case NAT_MODE_DIP_DP_SP:
1623                 set_nat_params(sc, f, true, false, true, true);
1624                 updated++;
1625                 break;
1626         case NAT_MODE_SIP_SP:
1627                 set_nat_params(sc, f, false, true, false, true);
1628                 updated++;
1629                 break;
1630         case NAT_MODE_DIP_SIP_SP:
1631                 set_nat_params(sc, f, true, true, false, true);
1632                 updated++;
1633                 break;
1634         case NAT_MODE_ALL:
1635                 set_nat_params(sc, f, true, true, true, true);
1636                 updated++;
1637                 break;
1638         default:
1639                 MPASS(0);       /* should have been validated earlier */
1640                 break;
1641
1642         }
1643
1644         if (f->fs.nat_seq_chk) {
1645                 set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
1646                     V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1647                     V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
1648                 updated++;
1649         }
1650
1651         if (is_t5(sc) && f->fs.action == FILTER_DROP) {
1652                 /*
1653                  * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
1654                  */
1655                 set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
1656                     V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
1657                 updated++;
1658         }
1659
1660         /*
1661          * Enable switching after all secondary resources (L2T entry, SMT entry,
1662          * etc.) are setup so that any switched packet will use correct
1663          * values.
1664          */
1665         if (f->fs.action == FILTER_SWITCH) {
1666                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
1667                 updated++;
1668         }
1669
1670         if (f->fs.hitcnts || updated > 0) {
1671                 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1672                     V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1673                     V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1674                     V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1675                 return (EINPROGRESS);
1676         }
1677
1678         return (0);
1679 }