]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/t4_filter.c
Remove unused stuff from iw_cxgbe.h
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / t4_filter.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
44 #include <sys/sbuf.h>
45 #include <netinet/in.h>
46
47 #include "common/common.h"
48 #include "common/t4_msg.h"
49 #include "common/t4_regs.h"
50 #include "common/t4_regs_values.h"
51 #include "common/t4_tcb.h"
52 #include "t4_l2t.h"
53 #include "t4_smt.h"
54
55 struct filter_entry {
56         uint32_t valid:1;       /* filter allocated and valid */
57         uint32_t locked:1;      /* filter is administratively locked or busy */
58         uint32_t pending:1;     /* filter action is pending firmware reply */
59         int tid;                /* tid of the filter TCB */
60         struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
61         struct smt_entry *smt;  /* SMT entry for SMAC rewrite */
62
63         struct t4_filter_specification fs;
64 };
65
66 static void free_filter_resources(struct filter_entry *);
67 static int get_tcamfilter(struct adapter *, struct t4_filter *);
68 static int get_hashfilter(struct adapter *, struct t4_filter *);
69 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
70     struct l2t_entry *, struct smt_entry *);
71 static int del_hashfilter(struct adapter *, struct t4_filter *);
72 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
73
74 static inline bool
75 separate_hpfilter_region(struct adapter *sc)
76 {
77
78         return (chip_id(sc) >= CHELSIO_T6);
79 }
80
81 static int
82 alloc_hftid_tab(struct tid_info *t, int flags)
83 {
84
85         MPASS(t->ntids > 0);
86         MPASS(t->hftid_tab == NULL);
87
88         t->hftid_tab = malloc(sizeof(*t->hftid_tab) * t->ntids, M_CXGBE,
89             M_ZERO | flags);
90         if (t->hftid_tab == NULL)
91                 return (ENOMEM);
92         mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
93         cv_init(&t->hftid_cv, "t4hfcv");
94
95         return (0);
96 }
97
98 void
99 free_hftid_tab(struct tid_info *t)
100 {
101         int i;
102
103         if (t->hftid_tab != NULL) {
104                 MPASS(t->ntids > 0);
105                 for (i = 0; t->tids_in_use > 0 && i < t->ntids; i++) {
106                         if (t->hftid_tab[i] == NULL)
107                                 continue;
108                         free(t->hftid_tab[i], M_CXGBE);
109                         t->tids_in_use--;
110                 }
111                 free(t->hftid_tab, M_CXGBE);
112                 t->hftid_tab = NULL;
113         }
114
115         if (mtx_initialized(&t->hftid_lock)) {
116                 mtx_destroy(&t->hftid_lock);
117                 cv_destroy(&t->hftid_cv);
118         }
119 }
120
121 static void
122 insert_hftid(struct adapter *sc, int tid, void *ctx, int ntids)
123 {
124         struct tid_info *t = &sc->tids;
125
126         t->hftid_tab[tid] = ctx;
127         atomic_add_int(&t->tids_in_use, ntids);
128 }
129
130 static void *
131 lookup_hftid(struct adapter *sc, int tid)
132 {
133         struct tid_info *t = &sc->tids;
134
135         return (t->hftid_tab[tid]);
136 }
137
138 static void
139 remove_hftid(struct adapter *sc, int tid, int ntids)
140 {
141         struct tid_info *t = &sc->tids;
142
143         t->hftid_tab[tid] = NULL;
144         atomic_subtract_int(&t->tids_in_use, ntids);
145 }
146
147 static uint32_t
148 mode_to_fconf(uint32_t mode)
149 {
150         uint32_t fconf = 0;
151
152         if (mode & T4_FILTER_IP_FRAGMENT)
153                 fconf |= F_FRAGMENTATION;
154
155         if (mode & T4_FILTER_MPS_HIT_TYPE)
156                 fconf |= F_MPSHITTYPE;
157
158         if (mode & T4_FILTER_MAC_IDX)
159                 fconf |= F_MACMATCH;
160
161         if (mode & T4_FILTER_ETH_TYPE)
162                 fconf |= F_ETHERTYPE;
163
164         if (mode & T4_FILTER_IP_PROTO)
165                 fconf |= F_PROTOCOL;
166
167         if (mode & T4_FILTER_IP_TOS)
168                 fconf |= F_TOS;
169
170         if (mode & T4_FILTER_VLAN)
171                 fconf |= F_VLAN;
172
173         if (mode & T4_FILTER_VNIC)
174                 fconf |= F_VNIC_ID;
175
176         if (mode & T4_FILTER_PORT)
177                 fconf |= F_PORT;
178
179         if (mode & T4_FILTER_FCoE)
180                 fconf |= F_FCOE;
181
182         return (fconf);
183 }
184
185 static uint32_t
186 mode_to_iconf(uint32_t mode)
187 {
188
189         if (mode & T4_FILTER_IC_VNIC)
190                 return (F_VNIC);
191         return (0);
192 }
193
194 static int
195 check_fspec_against_fconf_iconf(struct adapter *sc,
196     struct t4_filter_specification *fs)
197 {
198         struct tp_params *tpp = &sc->params.tp;
199         uint32_t fconf = 0;
200
201         if (fs->val.frag || fs->mask.frag)
202                 fconf |= F_FRAGMENTATION;
203
204         if (fs->val.matchtype || fs->mask.matchtype)
205                 fconf |= F_MPSHITTYPE;
206
207         if (fs->val.macidx || fs->mask.macidx)
208                 fconf |= F_MACMATCH;
209
210         if (fs->val.ethtype || fs->mask.ethtype)
211                 fconf |= F_ETHERTYPE;
212
213         if (fs->val.proto || fs->mask.proto)
214                 fconf |= F_PROTOCOL;
215
216         if (fs->val.tos || fs->mask.tos)
217                 fconf |= F_TOS;
218
219         if (fs->val.vlan_vld || fs->mask.vlan_vld)
220                 fconf |= F_VLAN;
221
222         if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
223                 fconf |= F_VNIC_ID;
224                 if (tpp->ingress_config & F_VNIC)
225                         return (EINVAL);
226         }
227
228         if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
229                 fconf |= F_VNIC_ID;
230                 if ((tpp->ingress_config & F_VNIC) == 0)
231                         return (EINVAL);
232         }
233
234         if (fs->val.iport || fs->mask.iport)
235                 fconf |= F_PORT;
236
237         if (fs->val.fcoe || fs->mask.fcoe)
238                 fconf |= F_FCOE;
239
240         if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
241                 return (E2BIG);
242
243         return (0);
244 }
245
246 int
247 get_filter_mode(struct adapter *sc, uint32_t *mode)
248 {
249         struct tp_params *tp = &sc->params.tp;
250         uint64_t mask;
251
252         /* Non-zero incoming value in mode means "hashfilter mode". */
253         mask = *mode ? tp->hash_filter_mask : UINT64_MAX;
254
255         /* Always */
256         *mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
257             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
258
259 #define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit)  do { \
260         if (tp->vlan_pri_map & (fconf_bit)) { \
261                 MPASS(tp->field_shift >= 0); \
262                 if ((mask >> tp->field_shift & field_mask) == field_mask) \
263                 *mode |= (mode_bit); \
264         } \
265 } while (0)
266
267         CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT);
268         CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE);
269         CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX);
270         CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE);
271         CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO);
272         CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS);
273         CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN);
274         CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC);
275         if (tp->ingress_config & F_VNIC)
276                 *mode |= T4_FILTER_IC_VNIC;
277         CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT);
278         CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE);
279 #undef CHECK_FIELD
280
281         return (0);
282 }
283
284 int
285 set_filter_mode(struct adapter *sc, uint32_t mode)
286 {
287         struct tp_params *tpp = &sc->params.tp;
288         uint32_t fconf, iconf;
289         int rc;
290
291         iconf = mode_to_iconf(mode);
292         if ((iconf ^ tpp->ingress_config) & F_VNIC) {
293                 /*
294                  * For now we just complain if A_TP_INGRESS_CONFIG is not
295                  * already set to the correct value for the requested filter
296                  * mode.  It's not clear if it's safe to write to this register
297                  * on the fly.  (And we trust the cached value of the register).
298                  *
299                  * check_fspec_against_fconf_iconf and other code that looks at
300                  * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
301                  * thorougly before allowing dynamic filter mode changes.
302                  */
303                 return (EBUSY);
304         }
305
306         fconf = mode_to_fconf(mode);
307
308         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
309             "t4setfm");
310         if (rc)
311                 return (rc);
312
313         if (sc->tids.ftids_in_use > 0 || sc->tids.hpftids_in_use > 0) {
314                 rc = EBUSY;
315                 goto done;
316         }
317
318 #ifdef TCP_OFFLOAD
319         if (uld_active(sc, ULD_TOM)) {
320                 rc = EBUSY;
321                 goto done;
322         }
323 #endif
324
325         rc = -t4_set_filter_mode(sc, fconf, true);
326 done:
327         end_synchronized_op(sc, LOCK_HELD);
328         return (rc);
329 }
330
331 static inline uint64_t
332 get_filter_hits(struct adapter *sc, uint32_t tid)
333 {
334         uint32_t tcb_addr;
335
336         tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
337
338         if (is_t4(sc)) {
339                 uint64_t hits;
340
341                 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
342                 return (be64toh(hits));
343         } else {
344                 uint32_t hits;
345
346                 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
347                 return (be32toh(hits));
348         }
349 }
350
351 int
352 get_filter(struct adapter *sc, struct t4_filter *t)
353 {
354         if (t->fs.hash)
355                 return (get_hashfilter(sc, t));
356         else
357                 return (get_tcamfilter(sc, t));
358 }
359
360 static int
361 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
362     struct smt_entry *smt)
363 {
364         struct filter_entry *f;
365         struct fw_filter2_wr *fwr;
366         u_int vnic_vld, vnic_vld_mask;
367         struct wrq_cookie cookie;
368         int i, rc, busy, locked;
369         u_int tid;
370         const int ntids = t->fs.type ? 4 : 1;
371
372         MPASS(!t->fs.hash);
373         /* Already validated against fconf, iconf */
374         MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
375         MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
376
377         if (separate_hpfilter_region(sc) && t->fs.prio) {
378                 MPASS(t->idx < sc->tids.nhpftids);
379                 f = &sc->tids.hpftid_tab[t->idx];
380                 tid = sc->tids.hpftid_base + t->idx;
381         } else {
382                 MPASS(t->idx < sc->tids.nftids);
383                 f = &sc->tids.ftid_tab[t->idx];
384                 tid = sc->tids.ftid_base + t->idx;
385         }
386         rc = busy = locked = 0;
387         mtx_lock(&sc->tids.ftid_lock);
388         for (i = 0; i < ntids; i++) {
389                 busy += f[i].pending + f[i].valid;
390                 locked += f[i].locked;
391         }
392         if (locked > 0)
393                 rc = EPERM;
394         else if (busy > 0)
395                 rc = EBUSY;
396         else {
397                 int len16;
398
399                 if (sc->params.filter2_wr_support)
400                         len16 = howmany(sizeof(struct fw_filter2_wr), 16);
401                 else
402                         len16 = howmany(sizeof(struct fw_filter_wr), 16);
403                 fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
404                 if (__predict_false(fwr == NULL))
405                         rc = ENOMEM;
406                 else {
407                         f->pending = 1;
408                         if (separate_hpfilter_region(sc) && t->fs.prio)
409                                 sc->tids.hpftids_in_use++;
410                         else
411                                 sc->tids.ftids_in_use++;
412                 }
413         }
414         mtx_unlock(&sc->tids.ftid_lock);
415         if (rc != 0) {
416                 if (l2te)
417                         t4_l2t_release(l2te);
418                 if (smt)
419                         t4_smt_release(smt);
420                 return (rc);
421         }
422
423         /*
424          * Can't fail now.  A set-filter WR will definitely be sent.
425          */
426
427         f->tid = tid;
428         f->fs = t->fs;
429         f->l2te = l2te;
430         f->smt = smt;
431
432         if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
433                 vnic_vld = 1;
434         else
435                 vnic_vld = 0;
436         if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
437                 vnic_vld_mask = 1;
438         else
439                 vnic_vld_mask = 0;
440
441         bzero(fwr, sizeof(*fwr));
442         if (sc->params.filter2_wr_support)
443                 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
444         else
445                 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
446         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
447         fwr->tid_to_iq =
448             htobe32(V_FW_FILTER_WR_TID(f->tid) |
449                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
450                 V_FW_FILTER_WR_NOREPLY(0) |
451                 V_FW_FILTER_WR_IQ(f->fs.iq));
452         fwr->del_filter_to_l2tix =
453             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
454                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
455                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
456                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
457                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
458                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
459                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
460                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
461                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
462                     f->fs.newvlan == VLAN_REWRITE) |
463                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
464                     f->fs.newvlan == VLAN_REWRITE) |
465                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
466                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
467                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
468                 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
469         fwr->ethtype = htobe16(f->fs.val.ethtype);
470         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
471         fwr->frag_to_ovlan_vldm =
472             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
473                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
474                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
475                 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
476                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
477                 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
478         fwr->smac_sel = 0;
479         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
480             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
481         fwr->maci_to_matchtypem =
482             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
483                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
484                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
485                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
486                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
487                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
488                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
489                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
490         fwr->ptcl = f->fs.val.proto;
491         fwr->ptclm = f->fs.mask.proto;
492         fwr->ttyp = f->fs.val.tos;
493         fwr->ttypm = f->fs.mask.tos;
494         fwr->ivlan = htobe16(f->fs.val.vlan);
495         fwr->ivlanm = htobe16(f->fs.mask.vlan);
496         fwr->ovlan = htobe16(f->fs.val.vnic);
497         fwr->ovlanm = htobe16(f->fs.mask.vnic);
498         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
499         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
500         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
501         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
502         fwr->lp = htobe16(f->fs.val.dport);
503         fwr->lpm = htobe16(f->fs.mask.dport);
504         fwr->fp = htobe16(f->fs.val.sport);
505         fwr->fpm = htobe16(f->fs.mask.sport);
506         /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
507         bzero(fwr->sma, sizeof (fwr->sma));
508         if (sc->params.filter2_wr_support) {
509                 fwr->filter_type_swapmac =
510                     V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
511                 fwr->natmode_to_ulp_type =
512                     V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
513                         ULP_MODE_TCPDDP : ULP_MODE_NONE) |
514                     V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
515                     V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
516                 memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
517                 memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
518                 fwr->newlport = htobe16(f->fs.nat_dport);
519                 fwr->newfport = htobe16(f->fs.nat_sport);
520                 fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
521         }
522         commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
523
524         /* Wait for response. */
525         mtx_lock(&sc->tids.ftid_lock);
526         for (;;) {
527                 if (f->pending == 0) {
528                         rc = f->valid ? 0 : EIO;
529                         break;
530                 }
531                 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
532                         rc = EINPROGRESS;
533                         break;
534                 }
535         }
536         mtx_unlock(&sc->tids.ftid_lock);
537         return (rc);
538 }
539
540 static int
541 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
542     uint64_t *ftuple)
543 {
544         struct tp_params *tp = &sc->params.tp;
545         uint64_t fmask;
546
547         *ftuple = fmask = 0;
548
549         /*
550          * Initialize each of the fields which we care about which are present
551          * in the Compressed Filter Tuple.
552          */
553         if (tp->vlan_shift >= 0 && fs->mask.vlan) {
554                 *ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
555                 fmask |= M_FT_VLAN << tp->vlan_shift;
556         }
557
558         if (tp->port_shift >= 0 && fs->mask.iport) {
559                 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
560                 fmask |= M_FT_PORT << tp->port_shift;
561         }
562
563         if (tp->protocol_shift >= 0 && fs->mask.proto) {
564                 *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
565                 fmask |= M_FT_PROTOCOL << tp->protocol_shift;
566         }
567
568         if (tp->tos_shift >= 0 && fs->mask.tos) {
569                 *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
570                 fmask |= M_FT_TOS << tp->tos_shift;
571         }
572
573         if (tp->vnic_shift >= 0 && fs->mask.vnic) {
574                 /* F_VNIC in ingress config was already validated. */
575                 if (tp->ingress_config & F_VNIC)
576                         MPASS(fs->mask.pfvf_vld);
577                 else
578                         MPASS(fs->mask.ovlan_vld);
579
580                 *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
581                 fmask |= M_FT_VNIC_ID << tp->vnic_shift;
582         }
583
584         if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
585                 *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
586                 fmask |= M_FT_MACMATCH << tp->macmatch_shift;
587         }
588
589         if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
590                 *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
591                 fmask |= M_FT_ETHERTYPE << tp->ethertype_shift;
592         }
593
594         if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
595                 *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
596                 fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift;
597         }
598
599         if (tp->frag_shift >= 0 && fs->mask.frag) {
600                 *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
601                 fmask |= M_FT_FRAGMENTATION << tp->frag_shift;
602         }
603
604         if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
605                 *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
606                 fmask |= M_FT_FCOE << tp->fcoe_shift;
607         }
608
609         /* A hashfilter must conform to the filterMask. */
610         if (fmask != tp->hash_filter_mask)
611                 return (EINVAL);
612
613         return (0);
614 }
615
616 int
617 set_filter(struct adapter *sc, struct t4_filter *t)
618 {
619         struct tid_info *ti = &sc->tids;
620         struct l2t_entry *l2te;
621         struct smt_entry *smt;
622         uint64_t ftuple;
623         int rc;
624
625         /*
626          * Basic filter checks first.
627          */
628
629         if (t->fs.hash) {
630                 if (!is_hashfilter(sc) || ti->ntids == 0)
631                         return (ENOTSUP);
632                 /* Hardware, not user, selects a tid for hashfilters. */
633                 if (t->idx != (uint32_t)-1)
634                         return (EINVAL);
635                 /* T5 can't count hashfilter hits. */
636                 if (is_t5(sc) && t->fs.hitcnts)
637                         return (EINVAL);
638                 rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
639                 if (rc != 0)
640                         return (rc);
641         } else {
642                 if (separate_hpfilter_region(sc) && t->fs.prio) {
643                         if (ti->nhpftids == 0)
644                                 return (ENOTSUP);
645                         if (t->idx >= ti->nhpftids)
646                                 return (EINVAL);
647                 } else {
648                         if (ti->nftids == 0)
649                                 return (ENOTSUP);
650                         if (t->idx >= ti->nftids)
651                                 return (EINVAL);
652                 }
653                 /* IPv6 filter idx must be 4 aligned */
654                 if (t->fs.type == 1 &&
655                     ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
656                         return (EINVAL);
657         }
658
659         /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
660         if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
661             (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
662             t->fs.swapmac || t->fs.nat_mode))
663                 return (ENOTSUP);
664
665         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
666                 return (EINVAL);
667         if (t->fs.val.iport >= sc->params.nports)
668                 return (EINVAL);
669
670         /* Can't specify an iq if not steering to it */
671         if (!t->fs.dirsteer && t->fs.iq)
672                 return (EINVAL);
673
674         /* Validate against the global filter mode and ingress config */
675         rc = check_fspec_against_fconf_iconf(sc, &t->fs);
676         if (rc != 0)
677                 return (rc);
678
679         /*
680          * Basic checks passed.  Make sure the queues and tid tables are setup.
681          */
682
683         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
684         if (rc)
685                 return (rc);
686         if (!(sc->flags & FULL_INIT_DONE) &&
687             ((rc = adapter_full_init(sc)) != 0)) {
688                 end_synchronized_op(sc, 0);
689                 return (rc);
690         }
691         if (t->fs.hash) {
692                 if (__predict_false(ti->hftid_tab == NULL)) {
693                         rc = alloc_hftid_tab(&sc->tids, M_NOWAIT);
694                         if (rc != 0)
695                                 goto done;
696                 }
697                 if (__predict_false(sc->tids.atid_tab == NULL)) {
698                         rc = alloc_atid_tab(&sc->tids, M_NOWAIT);
699                         if (rc != 0)
700                                 goto done;
701                 }
702         } else if (separate_hpfilter_region(sc) && t->fs.prio &&
703             __predict_false(ti->hpftid_tab == NULL)) {
704                 MPASS(ti->nhpftids != 0);
705                 KASSERT(ti->hpftids_in_use == 0,
706                     ("%s: no memory allocated but hpftids_in_use is %u",
707                     __func__, ti->hpftids_in_use));
708                 ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
709                     ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
710                 if (ti->hpftid_tab == NULL) {
711                         rc = ENOMEM;
712                         goto done;
713                 }
714                 if (!mtx_initialized(&sc->tids.ftid_lock)) {
715                         mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
716                         cv_init(&ti->ftid_cv, "t4fcv");
717                 }
718         } else if (__predict_false(ti->ftid_tab == NULL)) {
719                 MPASS(ti->nftids != 0);
720                 KASSERT(ti->ftids_in_use == 0,
721                     ("%s: no memory allocated but ftids_in_use is %u",
722                     __func__, ti->ftids_in_use));
723                 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
724                     M_CXGBE, M_NOWAIT | M_ZERO);
725                 if (ti->ftid_tab == NULL) {
726                         rc = ENOMEM;
727                         goto done;
728                 }
729                 if (!mtx_initialized(&sc->tids.ftid_lock)) {
730                         mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
731                         cv_init(&ti->ftid_cv, "t4fcv");
732                 }
733         }
734 done:
735         end_synchronized_op(sc, 0);
736         if (rc != 0)
737                 return (rc);
738
739         /*
740          * Allocate L2T entry, SMT entry, etc.
741          */
742
743         l2te = NULL;
744         if (t->fs.newdmac || t->fs.newvlan) {
745                 /* This filter needs an L2T entry; allocate one. */
746                 l2te = t4_l2t_alloc_switching(sc->l2t);
747                 if (__predict_false(l2te == NULL))
748                         return (EAGAIN);
749                 rc = t4_l2t_set_switching(sc, l2te, t->fs.vlan, t->fs.eport,
750                     t->fs.dmac);
751                 if (rc) {
752                         t4_l2t_release(l2te);
753                         return (ENOMEM);
754                 }
755         }
756
757         smt = NULL;
758         if (t->fs.newsmac) {
759                 /* This filter needs an SMT entry; allocate one. */
760                 smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
761                 if (__predict_false(smt == NULL)) {
762                         if (l2te != NULL)
763                                 t4_l2t_release(l2te);
764                         return (EAGAIN);
765                 }
766                 rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
767                 if (rc) {
768                         t4_smt_release(smt);
769                         if (l2te != NULL)
770                                 t4_l2t_release(l2te);
771                         return (rc);
772                 }
773         }
774
775         if (t->fs.hash)
776                 return (set_hashfilter(sc, t, ftuple, l2te, smt));
777         else
778                 return (set_tcamfilter(sc, t, l2te, smt));
779
780 }
781
782 static int
783 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
784 {
785         struct filter_entry *f;
786         struct fw_filter_wr *fwr;
787         struct wrq_cookie cookie;
788         int rc, nfilters;
789 #ifdef INVARIANTS
790         u_int tid_base;
791 #endif
792
793         mtx_lock(&sc->tids.ftid_lock);
794         if (separate_hpfilter_region(sc) && t->fs.prio) {
795                 nfilters = sc->tids.nhpftids;
796                 f = sc->tids.hpftid_tab;
797 #ifdef INVARIANTS
798                 tid_base = sc->tids.hpftid_base;
799 #endif
800         } else {
801                 nfilters = sc->tids.nftids;
802                 f = sc->tids.ftid_tab;
803 #ifdef INVARIANTS
804                 tid_base = sc->tids.ftid_base;
805 #endif
806         }
807         MPASS(f != NULL);       /* Caller checked this. */
808         if (t->idx >= nfilters) {
809                 rc = EINVAL;
810                 goto done;
811         }
812         f += t->idx;
813
814         if (f->locked) {
815                 rc = EPERM;
816                 goto done;
817         }
818         if (f->pending) {
819                 rc = EBUSY;
820                 goto done;
821         }
822         if (f->valid == 0) {
823                 rc = EINVAL;
824                 goto done;
825         }
826         MPASS(f->tid == tid_base + t->idx);
827         fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
828         if (fwr == NULL) {
829                 rc = ENOMEM;
830                 goto done;
831         }
832
833         bzero(fwr, sizeof (*fwr));
834         t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
835         f->pending = 1;
836         commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
837         t->fs = f->fs;  /* extra info for the caller */
838
839         for (;;) {
840                 if (f->pending == 0) {
841                         rc = f->valid ? EIO : 0;
842                         break;
843                 }
844                 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
845                         rc = EINPROGRESS;
846                         break;
847                 }
848         }
849 done:
850         mtx_unlock(&sc->tids.ftid_lock);
851         return (rc);
852 }
853
854 int
855 del_filter(struct adapter *sc, struct t4_filter *t)
856 {
857
858         /* No filters possible if not initialized yet. */
859         if (!(sc->flags & FULL_INIT_DONE))
860                 return (EINVAL);
861
862         /*
863          * The checks for tid tables ensure that the locks that del_* will reach
864          * for are initialized.
865          */
866         if (t->fs.hash) {
867                 if (sc->tids.hftid_tab != NULL)
868                         return (del_hashfilter(sc, t));
869         } else if (separate_hpfilter_region(sc) && t->fs.prio) {
870                 if (sc->tids.hpftid_tab != NULL)
871                         return (del_tcamfilter(sc, t));
872         } else {
873                 if (sc->tids.ftid_tab != NULL)
874                         return (del_tcamfilter(sc, t));
875         }
876
877         return (EINVAL);
878 }
879
880 /*
881  * Release secondary resources associated with the filter.
882  */
883 static void
884 free_filter_resources(struct filter_entry *f)
885 {
886
887         if (f->l2te) {
888                 t4_l2t_release(f->l2te);
889                 f->l2te = NULL;
890         }
891         if (f->smt) {
892                 t4_smt_release(f->smt);
893                 f->smt = NULL;
894         }
895 }
896
897 static int
898 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
899     uint64_t val, int no_reply)
900 {
901         struct wrq_cookie cookie;
902         struct cpl_set_tcb_field *req;
903
904         req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
905         if (req == NULL)
906                 return (ENOMEM);
907         bzero(req, sizeof(*req));
908         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
909         if (no_reply == 0) {
910                 req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
911                     V_NO_REPLY(0));
912         } else
913                 req->reply_ctrl = htobe16(V_NO_REPLY(1));
914         req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
915         req->mask = htobe64(mask);
916         req->val = htobe64(val);
917         commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
918
919         return (0);
920 }
921
922 /* Set one of the t_flags bits in the TCB. */
923 static inline int
924 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
925     u_int no_reply)
926 {
927
928         return (set_tcb_field(sc, tid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
929             (uint64_t)val << bit_pos, no_reply));
930 }
931
932 int
933 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
934 {
935         struct adapter *sc = iq->adapter;
936         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
937         u_int tid = GET_TID(rpl);
938         u_int rc, idx;
939         struct filter_entry *f;
940
941         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
942             rss->opcode));
943
944
945         if (is_hpftid(sc, tid)) {
946                 idx = tid - sc->tids.hpftid_base;
947                 f = &sc->tids.hpftid_tab[idx];
948         } else if (is_ftid(sc, tid)) {
949                 idx = tid - sc->tids.ftid_base;
950                 f = &sc->tids.ftid_tab[idx];
951         } else
952                 panic("%s: FW reply for invalid TID %d.", __func__, tid);
953
954         MPASS(f->tid == tid);
955         rc = G_COOKIE(rpl->cookie);
956
957         mtx_lock(&sc->tids.ftid_lock);
958         KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
959             __func__, rc, tid));
960         switch(rc) {
961         case FW_FILTER_WR_FLT_ADDED:
962                 /* set-filter succeeded */
963                 f->valid = 1;
964                 if (f->fs.newsmac) {
965                         MPASS(f->smt != NULL);
966                         set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
967                         set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
968                             V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
969                             V_TCB_SMAC_SEL(f->smt->idx), 1);
970                         /* XXX: wait for reply to TCB update before !pending */
971                 }
972                 break;
973         case FW_FILTER_WR_FLT_DELETED:
974                 /* del-filter succeeded */
975                 MPASS(f->valid == 1);
976                 f->valid = 0;
977                 /* Fall through */
978         case FW_FILTER_WR_SMT_TBL_FULL:
979                 /* set-filter failed due to lack of SMT space. */
980                 MPASS(f->valid == 0);
981                 free_filter_resources(f);
982                 if (separate_hpfilter_region(sc) && f->fs.prio)
983                         sc->tids.hpftids_in_use--;
984                 else
985                         sc->tids.ftids_in_use--;
986                 break;
987         case FW_FILTER_WR_SUCCESS:
988         case FW_FILTER_WR_EINVAL:
989         default:
990                 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
991                     idx);
992         }
993         f->pending = 0;
994         cv_broadcast(&sc->tids.ftid_cv);
995         mtx_unlock(&sc->tids.ftid_lock);
996
997         return (0);
998 }
999
1000 /*
1001  * This is the reply to the Active Open that created the filter.  Additional TCB
1002  * updates may be required to complete the filter configuration.
1003  */
1004 int
1005 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1006     struct mbuf *m)
1007 {
1008         struct adapter *sc = iq->adapter;
1009         const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1010         u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1011         u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1012         struct filter_entry *f = lookup_atid(sc, atid);
1013
1014         KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1015
1016         mtx_lock(&sc->tids.hftid_lock);
1017         KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1018         KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1019             __func__, f, f->tid));
1020         if (status == CPL_ERR_NONE) {
1021                 struct filter_entry *f2;
1022
1023                 f->tid = GET_TID(cpl);
1024                 MPASS(f->tid < sc->tids.ntids);
1025                 if (__predict_false((f2 = lookup_hftid(sc, f->tid)) != NULL)) {
1026                         /* XXX: avoid hash collisions in the first place. */
1027                         MPASS(f2->tid == f->tid);
1028                         remove_hftid(sc, f2->tid, f2->fs.type ? 2 : 1);
1029                         free_filter_resources(f2);
1030                         free(f2, M_CXGBE);
1031                 }
1032                 insert_hftid(sc, f->tid, f, f->fs.type ? 2 : 1);
1033                 /*
1034                  * Leave the filter pending until it is fully set up, which will
1035                  * be indicated by the reply to the last TCB update.  No need to
1036                  * unblock the ioctl thread either.
1037                  */
1038                 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1039                         goto done;
1040                 f->valid = 1;
1041                 f->pending = 0;
1042         } else {
1043                 /* provide errno instead of tid to ioctl */
1044                 f->tid = act_open_rpl_status_to_errno(status);
1045                 f->valid = 0;
1046                 if (act_open_has_tid(status))
1047                         release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1048                 free_filter_resources(f);
1049                 if (f->locked == 0)
1050                         free(f, M_CXGBE);
1051         }
1052         cv_broadcast(&sc->tids.hftid_cv);
1053 done:
1054         mtx_unlock(&sc->tids.hftid_lock);
1055
1056         free_atid(sc, atid);
1057         return (0);
1058 }
1059
1060 int
1061 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1062     struct mbuf *m)
1063 {
1064         struct adapter *sc = iq->adapter;
1065         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1066         u_int tid = GET_TID(rpl);
1067         struct filter_entry *f;
1068
1069         mtx_lock(&sc->tids.hftid_lock);
1070         f = lookup_hftid(sc, tid);
1071         KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1072         KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1073             f, tid));
1074         KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1075             __func__, f, tid));
1076         f->pending = 0;
1077         if (rpl->status == 0) {
1078                 f->valid = 1;
1079         } else {
1080                 f->tid = EIO;
1081                 f->valid = 0;
1082                 free_filter_resources(f);
1083                 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
1084                 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1085                 if (f->locked == 0)
1086                         free(f, M_CXGBE);
1087         }
1088         cv_broadcast(&sc->tids.hftid_cv);
1089         mtx_unlock(&sc->tids.hftid_lock);
1090
1091         return (0);
1092 }
1093
1094 int
1095 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1096     struct mbuf *m)
1097 {
1098         struct adapter *sc = iq->adapter;
1099         const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1100         unsigned int tid = GET_TID(cpl);
1101         struct filter_entry *f;
1102
1103         mtx_lock(&sc->tids.hftid_lock);
1104         f = lookup_hftid(sc, tid);
1105         KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1106         KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1107             f, tid));
1108         KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1109             tid));
1110         f->pending = 0;
1111         if (cpl->status == 0) {
1112                 f->valid = 0;
1113                 free_filter_resources(f);
1114                 remove_hftid(sc, tid, f->fs.type ? 2 : 1);
1115                 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1116                 if (f->locked == 0)
1117                         free(f, M_CXGBE);
1118         }
1119         cv_broadcast(&sc->tids.hftid_cv);
1120         mtx_unlock(&sc->tids.hftid_lock);
1121
1122         return (0);
1123 }
1124
1125 static int
1126 get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1127 {
1128         int i, nfilters;
1129         struct filter_entry *f;
1130         u_int in_use;
1131 #ifdef INVARIANTS
1132         u_int tid_base;
1133 #endif
1134
1135         MPASS(!t->fs.hash);
1136
1137         if (separate_hpfilter_region(sc) && t->fs.prio) {
1138                 nfilters = sc->tids.nhpftids;
1139                 f = sc->tids.hpftid_tab;
1140                 in_use = sc->tids.hpftids_in_use;
1141 #ifdef INVARIANTS
1142                 tid_base = sc->tids.hpftid_base;
1143 #endif
1144         } else {
1145                 nfilters = sc->tids.nftids;
1146                 f = sc->tids.ftid_tab;
1147                 in_use = sc->tids.ftids_in_use;
1148 #ifdef INVARIANTS
1149                 tid_base = sc->tids.ftid_base;
1150 #endif
1151         }
1152
1153         if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1154                 t->idx = 0xffffffff;
1155                 return (0);
1156         }
1157
1158         f += t->idx;
1159         mtx_lock(&sc->tids.ftid_lock);
1160         for (i = t->idx; i < nfilters; i++, f++) {
1161                 if (f->valid) {
1162                         MPASS(f->tid == tid_base + i);
1163                         t->idx = i;
1164                         t->l2tidx = f->l2te ? f->l2te->idx : 0;
1165                         t->smtidx = f->smt ? f->smt->idx : 0;
1166                         if (f->fs.hitcnts)
1167                                 t->hits = get_filter_hits(sc, f->tid);
1168                         else
1169                                 t->hits = UINT64_MAX;
1170                         t->fs = f->fs;
1171
1172                         goto done;
1173                 }
1174         }
1175         t->idx = 0xffffffff;
1176 done:
1177         mtx_unlock(&sc->tids.ftid_lock);
1178         return (0);
1179 }
1180
1181 static int
1182 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1183 {
1184         int i, nfilters = sc->tids.ntids;
1185         struct filter_entry *f;
1186
1187         MPASS(t->fs.hash);
1188
1189         if (sc->tids.tids_in_use == 0 || sc->tids.hftid_tab == NULL ||
1190             t->idx >= nfilters) {
1191                 t->idx = 0xffffffff;
1192                 return (0);
1193         }
1194
1195         mtx_lock(&sc->tids.hftid_lock);
1196         for (i = t->idx; i < nfilters; i++) {
1197                 f = lookup_hftid(sc, i);
1198                 if (f != NULL && f->valid) {
1199                         t->idx = i;
1200                         t->l2tidx = f->l2te ? f->l2te->idx : 0;
1201                         t->smtidx = f->smt ? f->smt->idx : 0;
1202                         if (f->fs.hitcnts)
1203                                 t->hits = get_filter_hits(sc, t->idx);
1204                         else
1205                                 t->hits = UINT64_MAX;
1206                         t->fs = f->fs;
1207
1208                         goto done;
1209                 }
1210         }
1211         t->idx = 0xffffffff;
1212 done:
1213         mtx_unlock(&sc->tids.hftid_lock);
1214         return (0);
1215 }
1216
1217 static void
1218 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1219     uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1220 {
1221         struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1222         struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1223
1224         /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1225         MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1226         MPASS(atid >= 0);
1227
1228         if (chip_id(sc) == CHELSIO_T5) {
1229                 INIT_TP_WR(cpl5, 0);
1230         } else {
1231                 INIT_TP_WR(cpl6, 0);
1232                 cpl6->rsvd2 = 0;
1233                 cpl6->opt3 = 0;
1234         }
1235
1236         OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1237             V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1238             V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1239         cpl->local_port = htobe16(f->fs.val.dport);
1240         cpl->peer_port = htobe16(f->fs.val.sport);
1241         cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1242         cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1243         cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1244         cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1245         cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1246             f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1247             V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1248             V_NO_CONG(f->fs.rpttid) |
1249             V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1250             F_TCAM_BYPASS | F_NON_OFFLOAD);
1251
1252         cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1253         cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1254             V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1255             V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1256             F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1257             V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1258             V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1259 }
1260
1261 static void
1262 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1263     uint64_t ftuple, struct cpl_act_open_req *cpl)
1264 {
1265         struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1266         struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1267
1268         /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1269         MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1270         MPASS(atid >= 0);
1271
1272         if (chip_id(sc) == CHELSIO_T5) {
1273                 INIT_TP_WR(cpl5, 0);
1274         } else {
1275                 INIT_TP_WR(cpl6, 0);
1276                 cpl6->rsvd2 = 0;
1277                 cpl6->opt3 = 0;
1278         }
1279
1280         OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1281             V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1282             V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1283         cpl->local_port = htobe16(f->fs.val.dport);
1284         cpl->peer_port = htobe16(f->fs.val.sport);
1285         cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1286             f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1287         cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1288                 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1289         cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1290             f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1291             V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1292             V_NO_CONG(f->fs.rpttid) |
1293             V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1294             F_TCAM_BYPASS | F_NON_OFFLOAD);
1295
1296         cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1297         cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1298             V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1299             V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1300             F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1301             V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1302             V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1303 }
1304
1305 static int
1306 act_open_cpl_len16(struct adapter *sc, int isipv6)
1307 {
1308         int idx;
1309         static const int sz_table[3][2] = {
1310                 {
1311                         howmany(sizeof (struct cpl_act_open_req), 16),
1312                         howmany(sizeof (struct cpl_act_open_req6), 16)
1313                 },
1314                 {
1315                         howmany(sizeof (struct cpl_t5_act_open_req), 16),
1316                         howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1317                 },
1318                 {
1319                         howmany(sizeof (struct cpl_t6_act_open_req), 16),
1320                         howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1321                 },
1322         };
1323
1324         MPASS(chip_id(sc) >= CHELSIO_T4);
1325         idx = min(chip_id(sc) - CHELSIO_T4, 2);
1326
1327         return (sz_table[idx][!!isipv6]);
1328 }
1329
1330 static int
1331 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1332     struct l2t_entry *l2te, struct smt_entry *smt)
1333 {
1334         void *wr;
1335         struct wrq_cookie cookie;
1336         struct filter_entry *f;
1337         int rc, atid = -1;
1338
1339         MPASS(t->fs.hash);
1340         /* Already validated against fconf, iconf */
1341         MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1342         MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1343
1344         mtx_lock(&sc->tids.hftid_lock);
1345
1346         /*
1347          * XXX: Check for hash collisions and insert in the hash based lookup
1348          * table so that in-flight hashfilters are also considered when checking
1349          * for collisions.
1350          */
1351
1352         f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1353         if (__predict_false(f == NULL)) {
1354                 if (l2te)
1355                         t4_l2t_release(l2te);
1356                 if (smt)
1357                         t4_smt_release(smt);
1358                 rc = ENOMEM;
1359                 goto done;
1360         }
1361         f->fs = t->fs;
1362         f->l2te = l2te;
1363         f->smt = smt;
1364
1365         atid = alloc_atid(sc, f);
1366         if (__predict_false(atid) == -1) {
1367                 if (l2te)
1368                         t4_l2t_release(l2te);
1369                 if (smt)
1370                         t4_smt_release(smt);
1371                 free(f, M_CXGBE);
1372                 rc = EAGAIN;
1373                 goto done;
1374         }
1375         MPASS(atid >= 0);
1376
1377         wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1378             &cookie);
1379         if (wr == NULL) {
1380                 free_atid(sc, atid);
1381                 if (l2te)
1382                         t4_l2t_release(l2te);
1383                 if (smt)
1384                         t4_smt_release(smt);
1385                 free(f, M_CXGBE);
1386                 rc = ENOMEM;
1387                 goto done;
1388         }
1389         if (f->fs.type)
1390                 mk_act_open_req6(sc, f, atid, ftuple, wr);
1391         else
1392                 mk_act_open_req(sc, f, atid, ftuple, wr);
1393
1394         f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1395         f->pending = 1;
1396         f->tid = -1;
1397         commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1398
1399         for (;;) {
1400                 MPASS(f->locked);
1401                 if (f->pending == 0) {
1402                         if (f->valid) {
1403                                 rc = 0;
1404                                 f->locked = 0;
1405                                 t->idx = f->tid;
1406                         } else {
1407                                 rc = f->tid;
1408                                 free(f, M_CXGBE);
1409                         }
1410                         break;
1411                 }
1412                 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1413                         f->locked = 0;
1414                         rc = EINPROGRESS;
1415                         break;
1416                 }
1417         }
1418 done:
1419         mtx_unlock(&sc->tids.hftid_lock);
1420         return (rc);
1421 }
1422
1423 /* SET_TCB_FIELD sent as a ULP command looks like this */
1424 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1425     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1426
1427 static void *
1428 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1429                 uint64_t val, uint32_t tid, uint32_t qid)
1430 {
1431         struct ulptx_idata *ulpsc;
1432         struct cpl_set_tcb_field_core *req;
1433
1434         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1435         ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1436
1437         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1438         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1439         ulpsc->len = htobe32(sizeof(*req));
1440
1441         req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1442         OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1443         req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1444         req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1445         req->mask = htobe64(mask);
1446         req->val = htobe64(val);
1447
1448         ulpsc = (struct ulptx_idata *)(req + 1);
1449         if (LEN__SET_TCB_FIELD_ULP % 16) {
1450                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1451                 ulpsc->len = htobe32(0);
1452                 return (ulpsc + 1);
1453         }
1454         return (ulpsc);
1455 }
1456
1457 /* ABORT_REQ sent as a ULP command looks like this */
1458 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1459         sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1460
1461 static void *
1462 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1463 {
1464         struct ulptx_idata *ulpsc;
1465         struct cpl_abort_req_core *req;
1466
1467         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1468         ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1469
1470         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1471         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1472         ulpsc->len = htobe32(sizeof(*req));
1473
1474         req = (struct cpl_abort_req_core *)(ulpsc + 1);
1475         OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1476         req->rsvd0 = htonl(0);
1477         req->rsvd1 = 0;
1478         req->cmd = CPL_ABORT_NO_RST;
1479
1480         ulpsc = (struct ulptx_idata *)(req + 1);
1481         if (LEN__ABORT_REQ_ULP % 16) {
1482                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1483                 ulpsc->len = htobe32(0);
1484                 return (ulpsc + 1);
1485         }
1486         return (ulpsc);
1487 }
1488
1489 /* ABORT_RPL sent as a ULP command looks like this */
1490 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1491         sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1492
1493 static void *
1494 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1495 {
1496         struct ulptx_idata *ulpsc;
1497         struct cpl_abort_rpl_core *rpl;
1498
1499         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1500         ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1501
1502         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1503         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1504         ulpsc->len = htobe32(sizeof(*rpl));
1505
1506         rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1507         OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1508         rpl->rsvd0 = htonl(0);
1509         rpl->rsvd1 = 0;
1510         rpl->cmd = CPL_ABORT_NO_RST;
1511
1512         ulpsc = (struct ulptx_idata *)(rpl + 1);
1513         if (LEN__ABORT_RPL_ULP % 16) {
1514                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1515                 ulpsc->len = htobe32(0);
1516                 return (ulpsc + 1);
1517         }
1518         return (ulpsc);
1519 }
1520
1521 static inline int
1522 del_hashfilter_wrlen(void)
1523 {
1524
1525         return (sizeof(struct work_request_hdr) +
1526             roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1527             roundup2(LEN__ABORT_REQ_ULP, 16) +
1528             roundup2(LEN__ABORT_RPL_ULP, 16));
1529 }
1530
1531 static void
1532 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1533 {
1534         struct ulp_txpkt *ulpmc;
1535
1536         INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1537         ulpmc = (struct ulp_txpkt *)(wrh + 1);
1538         ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1539             V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1540         ulpmc = mk_abort_req_ulp(ulpmc, tid);
1541         ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1542 }
1543
1544 static int
1545 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1546 {
1547         void *wr;
1548         struct filter_entry *f;
1549         struct wrq_cookie cookie;
1550         int rc;
1551         const int wrlen = del_hashfilter_wrlen();
1552
1553         MPASS(sc->tids.hftid_tab != NULL);
1554         MPASS(sc->tids.ntids > 0);
1555
1556         if (t->idx >= sc->tids.ntids)
1557                 return (EINVAL);
1558
1559         mtx_lock(&sc->tids.hftid_lock);
1560         f = lookup_hftid(sc, t->idx);
1561         if (f == NULL || f->valid == 0) {
1562                 rc = EINVAL;
1563                 goto done;
1564         }
1565         MPASS(f->tid == t->idx);
1566         if (f->locked) {
1567                 rc = EPERM;
1568                 goto done;
1569         }
1570         if (f->pending) {
1571                 rc = EBUSY;
1572                 goto done;
1573         }
1574         wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
1575         if (wr == NULL) {
1576                 rc = ENOMEM;
1577                 goto done;
1578         }
1579
1580         mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1581         f->locked = 1;
1582         f->pending = 1;
1583         commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1584         t->fs = f->fs;  /* extra info for the caller */
1585
1586         for (;;) {
1587                 MPASS(f->locked);
1588                 if (f->pending == 0) {
1589                         if (f->valid) {
1590                                 f->locked = 0;
1591                                 rc = EIO;
1592                         } else {
1593                                 rc = 0;
1594                                 free(f, M_CXGBE);
1595                         }
1596                         break;
1597                 }
1598                 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1599                         f->locked = 0;
1600                         rc = EINPROGRESS;
1601                         break;
1602                 }
1603         }
1604 done:
1605         mtx_unlock(&sc->tids.hftid_lock);
1606         return (rc);
1607 }
1608
1609 #define WORD_MASK       0xffffffff
1610 static void
1611 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1612     const bool sip, const bool dp, const bool sp)
1613 {
1614
1615         if (dip) {
1616                 if (f->fs.type) {
1617                         set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1618                             f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1619                             f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1620
1621                         set_tcb_field(sc, f->tid,
1622                             W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1623                             f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1624                             f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1625
1626                         set_tcb_field(sc, f->tid,
1627                             W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1628                             f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1629                             f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1630
1631                         set_tcb_field(sc, f->tid,
1632                             W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1633                             f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1634                             f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1635                 } else {
1636                         set_tcb_field(sc, f->tid,
1637                             W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1638                             f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1639                             f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1640                 }
1641         }
1642
1643         if (sip) {
1644                 if (f->fs.type) {
1645                         set_tcb_field(sc, f->tid,
1646                             W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1647                             f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1648                             f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1649
1650                         set_tcb_field(sc, f->tid,
1651                             W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1652                             f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1653                             f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1654
1655                         set_tcb_field(sc, f->tid,
1656                             W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1657                             f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1658                             f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1659
1660                         set_tcb_field(sc, f->tid,
1661                             W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1662                             f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1663                             f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1664
1665                 } else {
1666                         set_tcb_field(sc, f->tid,
1667                             W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1668                             f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1669                             f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1670                 }
1671         }
1672
1673         set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1674             (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1675 }
1676
1677 /*
1678  * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1679  * last of the series of updates requested a reply.  The reply informs the
1680  * driver that the filter is fully setup.
1681  */
1682 static int
1683 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1684 {
1685         int updated = 0;
1686
1687         MPASS(f->tid < sc->tids.ntids);
1688         MPASS(f->fs.hash);
1689         MPASS(f->pending);
1690         MPASS(f->valid == 0);
1691
1692         if (f->fs.newdmac) {
1693                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1694                 updated++;
1695         }
1696
1697         if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1698                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1699                 updated++;
1700         }
1701
1702         if (f->fs.newsmac) {
1703                 MPASS(f->smt != NULL);
1704                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1705                 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1706                     V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1707                     1);
1708                 updated++;
1709         }
1710
1711         switch(f->fs.nat_mode) {
1712         case NAT_MODE_NONE:
1713                 break;
1714         case NAT_MODE_DIP:
1715                 set_nat_params(sc, f, true, false, false, false);
1716                 updated++;
1717                 break;
1718         case NAT_MODE_DIP_DP:
1719                 set_nat_params(sc, f, true, false, true, false);
1720                 updated++;
1721                 break;
1722         case NAT_MODE_DIP_DP_SIP:
1723                 set_nat_params(sc, f, true, true, true, false);
1724                 updated++;
1725                 break;
1726         case NAT_MODE_DIP_DP_SP:
1727                 set_nat_params(sc, f, true, false, true, true);
1728                 updated++;
1729                 break;
1730         case NAT_MODE_SIP_SP:
1731                 set_nat_params(sc, f, false, true, false, true);
1732                 updated++;
1733                 break;
1734         case NAT_MODE_DIP_SIP_SP:
1735                 set_nat_params(sc, f, true, true, false, true);
1736                 updated++;
1737                 break;
1738         case NAT_MODE_ALL:
1739                 set_nat_params(sc, f, true, true, true, true);
1740                 updated++;
1741                 break;
1742         default:
1743                 MPASS(0);       /* should have been validated earlier */
1744                 break;
1745
1746         }
1747
1748         if (f->fs.nat_seq_chk) {
1749                 set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
1750                     V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1751                     V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
1752                 updated++;
1753         }
1754
1755         if (is_t5(sc) && f->fs.action == FILTER_DROP) {
1756                 /*
1757                  * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
1758                  */
1759                 set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
1760                     V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
1761                 updated++;
1762         }
1763
1764         /*
1765          * Enable switching after all secondary resources (L2T entry, SMT entry,
1766          * etc.) are setup so that any switched packet will use correct
1767          * values.
1768          */
1769         if (f->fs.action == FILTER_SWITCH) {
1770                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
1771                 updated++;
1772         }
1773
1774         if (f->fs.hitcnts || updated > 0) {
1775                 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1776                     V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1777                     V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1778                     V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1779                 return (EINPROGRESS);
1780         }
1781
1782         return (0);
1783 }