]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/t4_filter.c
Import mandoc 1.14.4
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / t4_filter.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/fnv_hash.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/bus.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/rwlock.h>
44 #include <sys/socket.h>
45 #include <sys/sbuf.h>
46 #include <netinet/in.h>
47
48 #include "common/common.h"
49 #include "common/t4_msg.h"
50 #include "common/t4_regs.h"
51 #include "common/t4_regs_values.h"
52 #include "common/t4_tcb.h"
53 #include "t4_l2t.h"
54 #include "t4_smt.h"
55
56 struct filter_entry {
57         LIST_ENTRY(filter_entry) link_4t;
58         LIST_ENTRY(filter_entry) link_tid;
59
60         uint32_t valid:1;       /* filter allocated and valid */
61         uint32_t locked:1;      /* filter is administratively locked or busy */
62         uint32_t pending:1;     /* filter action is pending firmware reply */
63         int tid;                /* tid of the filter TCB */
64         struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
65         struct smt_entry *smt;  /* SMT entry for SMAC rewrite */
66
67         struct t4_filter_specification fs;
68 };
69
70 static void free_filter_resources(struct filter_entry *);
71 static int get_tcamfilter(struct adapter *, struct t4_filter *);
72 static int get_hashfilter(struct adapter *, struct t4_filter *);
73 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
74     struct l2t_entry *, struct smt_entry *);
75 static int del_hashfilter(struct adapter *, struct t4_filter *);
76 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
77
78 static inline bool
79 separate_hpfilter_region(struct adapter *sc)
80 {
81
82         return (chip_id(sc) >= CHELSIO_T6);
83 }
84
85 static inline uint32_t
86 hf_hashfn_4t(struct t4_filter_specification *fs)
87 {
88         struct t4_filter_tuple *ft = &fs->val;
89         uint32_t hash;
90
91         if (fs->type) {
92                 /* IPv6 */
93                 hash = fnv_32_buf(&ft->sip[0], 16, FNV1_32_INIT);
94                 hash = fnv_32_buf(&ft->dip[0], 16, hash);
95         } else {
96                 hash = fnv_32_buf(&ft->sip[0], 4, FNV1_32_INIT);
97                 hash = fnv_32_buf(&ft->dip[0], 4, hash);
98         }
99         hash = fnv_32_buf(&ft->sport, sizeof(ft->sport), hash);
100         hash = fnv_32_buf(&ft->dport, sizeof(ft->dport), hash);
101
102         return (hash);
103 }
104
105 static inline uint32_t
106 hf_hashfn_tid(int tid)
107 {
108
109         return (fnv_32_buf(&tid, sizeof(tid), FNV1_32_INIT));
110 }
111
112 static int
113 alloc_hftid_hash(struct tid_info *t, int flags)
114 {
115         int n;
116
117         MPASS(t->ntids > 0);
118         MPASS(t->hftid_hash_4t == NULL);
119         MPASS(t->hftid_hash_tid == NULL);
120
121         n = max(t->ntids / 1024, 16);
122         t->hftid_hash_4t = hashinit_flags(n, M_CXGBE, &t->hftid_4t_mask, flags);
123         if (t->hftid_hash_4t == NULL)
124                 return (ENOMEM);
125         t->hftid_hash_tid = hashinit_flags(n, M_CXGBE, &t->hftid_tid_mask,
126             flags);
127         if (t->hftid_hash_tid == NULL) {
128                 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
129                 t->hftid_hash_4t = NULL;
130                 return (ENOMEM);
131         }
132
133         mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
134         cv_init(&t->hftid_cv, "t4hfcv");
135
136         return (0);
137 }
138
139 void
140 free_hftid_hash(struct tid_info *t)
141 {
142         struct filter_entry *f, *ftmp;
143         LIST_HEAD(, filter_entry) *head;
144         int i;
145 #ifdef INVARIANTS
146         int n = 0;
147 #endif
148
149         if (t->tids_in_use > 0) {
150                 /* Remove everything from the tid hash. */
151                 head = t->hftid_hash_tid;
152                 for (i = 0; i <= t->hftid_tid_mask; i++) {
153                         LIST_FOREACH_SAFE(f, &head[i], link_tid, ftmp) {
154                                 LIST_REMOVE(f, link_tid);
155                         }
156                 }
157
158                 /* Remove and then free each filter in the 4t hash. */
159                 head = t->hftid_hash_4t;
160                 for (i = 0; i <= t->hftid_4t_mask; i++) {
161                         LIST_FOREACH_SAFE(f, &head[i], link_4t, ftmp) {
162 #ifdef INVARIANTS
163                                 n += f->fs.type ? 2 : 1;
164 #endif
165                                 LIST_REMOVE(f, link_4t);
166                                 free(f, M_CXGBE);
167                         }
168                 }
169                 MPASS(t->tids_in_use == n);
170                 t->tids_in_use = 0;
171         }
172
173         if (t->hftid_hash_4t) {
174                 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
175                 t->hftid_hash_4t = NULL;
176         }
177         if (t->hftid_hash_tid) {
178                 hashdestroy(t->hftid_hash_tid, M_CXGBE, t->hftid_tid_mask);
179                 t->hftid_hash_tid = NULL;
180         }
181         if (mtx_initialized(&t->hftid_lock)) {
182                 mtx_destroy(&t->hftid_lock);
183                 cv_destroy(&t->hftid_cv);
184         }
185 }
186
187 static void
188 insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash)
189 {
190         struct tid_info *t = &sc->tids;
191         LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
192
193         MPASS(head != NULL);
194         if (hash == 0)
195                 hash = hf_hashfn_4t(&f->fs);
196         LIST_INSERT_HEAD(&head[hash & t->hftid_4t_mask], f, link_4t);
197         atomic_add_int(&t->tids_in_use, f->fs.type ? 2 : 1);
198 }
199
200 static void
201 insert_hftid(struct adapter *sc, struct filter_entry *f)
202 {
203         struct tid_info *t = &sc->tids;
204         LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
205         uint32_t hash;
206
207         MPASS(f->tid >= t->tid_base);
208         MPASS(f->tid - t->tid_base < t->ntids);
209         mtx_assert(&t->hftid_lock, MA_OWNED);
210
211         hash = hf_hashfn_tid(f->tid);
212         LIST_INSERT_HEAD(&head[hash & t->hftid_tid_mask], f, link_tid);
213 }
214
215 static bool
216 filter_eq(struct t4_filter_specification *fs1,
217     struct t4_filter_specification *fs2)
218 {
219         int n;
220
221         MPASS(fs1->hash && fs2->hash);
222
223         if (fs1->type != fs2->type)
224                 return (false);
225
226         n = fs1->type ? 16 : 4;
227         if (bcmp(&fs1->val.sip[0], &fs2->val.sip[0], n) ||
228             bcmp(&fs1->val.dip[0], &fs2->val.dip[0], n) ||
229             fs1->val.sport != fs2->val.sport ||
230             fs1->val.dport != fs2->val.dport)
231                 return (false);
232
233         /*
234          * We know the masks are the same because all hashfilter masks have to
235          * conform to the global tp->hash_filter_mask and the driver has
236          * verified that already.
237          */
238
239         if ((fs1->mask.pfvf_vld || fs1->mask.ovlan_vld) &&
240             fs1->val.vnic != fs2->val.vnic)
241                 return (false);
242         if (fs1->mask.vlan_vld && fs1->val.vlan != fs2->val.vlan)
243                 return (false);
244         if (fs1->mask.macidx && fs1->val.macidx != fs2->val.macidx)
245                 return (false);
246         if (fs1->mask.frag && fs1->val.frag != fs2->val.frag)
247                 return (false);
248         if (fs1->mask.matchtype && fs1->val.matchtype != fs2->val.matchtype)
249                 return (false);
250         if (fs1->mask.iport && fs1->val.iport != fs2->val.iport)
251                 return (false);
252         if (fs1->mask.fcoe && fs1->val.fcoe != fs2->val.fcoe)
253                 return (false);
254         if (fs1->mask.proto && fs1->val.proto != fs2->val.proto)
255                 return (false);
256         if (fs1->mask.tos && fs1->val.tos != fs2->val.tos)
257                 return (false);
258         if (fs1->mask.ethtype && fs1->val.ethtype != fs2->val.ethtype)
259                 return (false);
260
261         return (true);
262 }
263
264 static struct filter_entry *
265 lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash)
266 {
267         struct tid_info *t = &sc->tids;
268         LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
269         struct filter_entry *f;
270
271         mtx_assert(&t->hftid_lock, MA_OWNED);
272         MPASS(head != NULL);
273
274         if (hash == 0)
275                 hash = hf_hashfn_4t(fs);
276
277         LIST_FOREACH(f, &head[hash & t->hftid_4t_mask], link_4t) {
278                 if (filter_eq(&f->fs, fs))
279                         return (f);
280         }
281
282         return (NULL);
283 }
284
285 static struct filter_entry *
286 lookup_hftid(struct adapter *sc, int tid)
287 {
288         struct tid_info *t = &sc->tids;
289         LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
290         struct filter_entry *f;
291         uint32_t hash;
292
293         mtx_assert(&t->hftid_lock, MA_OWNED);
294         MPASS(head != NULL);
295
296         hash = hf_hashfn_tid(tid);
297         LIST_FOREACH(f, &head[hash & t->hftid_tid_mask], link_tid) {
298                 if (f->tid == tid)
299                         return (f);
300         }
301
302         return (NULL);
303 }
304
305 static void
306 remove_hf(struct adapter *sc, struct filter_entry *f)
307 {
308         struct tid_info *t = &sc->tids;
309
310         mtx_assert(&t->hftid_lock, MA_OWNED);
311
312         LIST_REMOVE(f, link_4t);
313         atomic_subtract_int(&t->tids_in_use, f->fs.type ? 2 : 1);
314 }
315
316 static void
317 remove_hftid(struct adapter *sc, struct filter_entry *f)
318 {
319 #ifdef INVARIANTS
320         struct tid_info *t = &sc->tids;
321
322         mtx_assert(&t->hftid_lock, MA_OWNED);
323 #endif
324
325         LIST_REMOVE(f, link_tid);
326 }
327
328 static uint32_t
329 mode_to_fconf(uint32_t mode)
330 {
331         uint32_t fconf = 0;
332
333         if (mode & T4_FILTER_IP_FRAGMENT)
334                 fconf |= F_FRAGMENTATION;
335
336         if (mode & T4_FILTER_MPS_HIT_TYPE)
337                 fconf |= F_MPSHITTYPE;
338
339         if (mode & T4_FILTER_MAC_IDX)
340                 fconf |= F_MACMATCH;
341
342         if (mode & T4_FILTER_ETH_TYPE)
343                 fconf |= F_ETHERTYPE;
344
345         if (mode & T4_FILTER_IP_PROTO)
346                 fconf |= F_PROTOCOL;
347
348         if (mode & T4_FILTER_IP_TOS)
349                 fconf |= F_TOS;
350
351         if (mode & T4_FILTER_VLAN)
352                 fconf |= F_VLAN;
353
354         if (mode & T4_FILTER_VNIC)
355                 fconf |= F_VNIC_ID;
356
357         if (mode & T4_FILTER_PORT)
358                 fconf |= F_PORT;
359
360         if (mode & T4_FILTER_FCoE)
361                 fconf |= F_FCOE;
362
363         return (fconf);
364 }
365
366 static uint32_t
367 mode_to_iconf(uint32_t mode)
368 {
369
370         if (mode & T4_FILTER_IC_VNIC)
371                 return (F_VNIC);
372         return (0);
373 }
374
375 static int
376 check_fspec_against_fconf_iconf(struct adapter *sc,
377     struct t4_filter_specification *fs)
378 {
379         struct tp_params *tpp = &sc->params.tp;
380         uint32_t fconf = 0;
381
382         if (fs->val.frag || fs->mask.frag)
383                 fconf |= F_FRAGMENTATION;
384
385         if (fs->val.matchtype || fs->mask.matchtype)
386                 fconf |= F_MPSHITTYPE;
387
388         if (fs->val.macidx || fs->mask.macidx)
389                 fconf |= F_MACMATCH;
390
391         if (fs->val.ethtype || fs->mask.ethtype)
392                 fconf |= F_ETHERTYPE;
393
394         if (fs->val.proto || fs->mask.proto)
395                 fconf |= F_PROTOCOL;
396
397         if (fs->val.tos || fs->mask.tos)
398                 fconf |= F_TOS;
399
400         if (fs->val.vlan_vld || fs->mask.vlan_vld)
401                 fconf |= F_VLAN;
402
403         if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
404                 fconf |= F_VNIC_ID;
405                 if (tpp->ingress_config & F_VNIC)
406                         return (EINVAL);
407         }
408
409         if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
410                 fconf |= F_VNIC_ID;
411                 if ((tpp->ingress_config & F_VNIC) == 0)
412                         return (EINVAL);
413         }
414
415         if (fs->val.iport || fs->mask.iport)
416                 fconf |= F_PORT;
417
418         if (fs->val.fcoe || fs->mask.fcoe)
419                 fconf |= F_FCOE;
420
421         if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
422                 return (E2BIG);
423
424         return (0);
425 }
426
427 int
428 get_filter_mode(struct adapter *sc, uint32_t *mode)
429 {
430         struct tp_params *tp = &sc->params.tp;
431         uint64_t mask;
432
433         /* Non-zero incoming value in mode means "hashfilter mode". */
434         mask = *mode ? tp->hash_filter_mask : UINT64_MAX;
435
436         /* Always */
437         *mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
438             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
439
440 #define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit)  do { \
441         if (tp->vlan_pri_map & (fconf_bit)) { \
442                 MPASS(tp->field_shift >= 0); \
443                 if ((mask >> tp->field_shift & field_mask) == field_mask) \
444                 *mode |= (mode_bit); \
445         } \
446 } while (0)
447
448         CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT);
449         CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE);
450         CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX);
451         CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE);
452         CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO);
453         CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS);
454         CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN);
455         CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC);
456         if (tp->ingress_config & F_VNIC)
457                 *mode |= T4_FILTER_IC_VNIC;
458         CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT);
459         CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE);
460 #undef CHECK_FIELD
461
462         return (0);
463 }
464
465 int
466 set_filter_mode(struct adapter *sc, uint32_t mode)
467 {
468         struct tp_params *tpp = &sc->params.tp;
469         uint32_t fconf, iconf;
470         int rc;
471
472         iconf = mode_to_iconf(mode);
473         if ((iconf ^ tpp->ingress_config) & F_VNIC) {
474                 /*
475                  * For now we just complain if A_TP_INGRESS_CONFIG is not
476                  * already set to the correct value for the requested filter
477                  * mode.  It's not clear if it's safe to write to this register
478                  * on the fly.  (And we trust the cached value of the register).
479                  *
480                  * check_fspec_against_fconf_iconf and other code that looks at
481                  * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
482                  * thorougly before allowing dynamic filter mode changes.
483                  */
484                 return (EBUSY);
485         }
486
487         fconf = mode_to_fconf(mode);
488
489         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
490             "t4setfm");
491         if (rc)
492                 return (rc);
493
494         if (sc->tids.ftids_in_use > 0 || sc->tids.hpftids_in_use > 0) {
495                 rc = EBUSY;
496                 goto done;
497         }
498
499 #ifdef TCP_OFFLOAD
500         if (uld_active(sc, ULD_TOM)) {
501                 rc = EBUSY;
502                 goto done;
503         }
504 #endif
505
506         rc = -t4_set_filter_mode(sc, fconf, true);
507 done:
508         end_synchronized_op(sc, LOCK_HELD);
509         return (rc);
510 }
511
512 static inline uint64_t
513 get_filter_hits(struct adapter *sc, uint32_t tid)
514 {
515         uint32_t tcb_addr;
516
517         tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
518
519         if (is_t4(sc)) {
520                 uint64_t hits;
521
522                 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
523                 return (be64toh(hits));
524         } else {
525                 uint32_t hits;
526
527                 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
528                 return (be32toh(hits));
529         }
530 }
531
532 int
533 get_filter(struct adapter *sc, struct t4_filter *t)
534 {
535         if (t->fs.hash)
536                 return (get_hashfilter(sc, t));
537         else
538                 return (get_tcamfilter(sc, t));
539 }
540
541 static int
542 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
543     struct smt_entry *smt)
544 {
545         struct filter_entry *f;
546         struct fw_filter2_wr *fwr;
547         u_int vnic_vld, vnic_vld_mask;
548         struct wrq_cookie cookie;
549         int i, rc, busy, locked;
550         u_int tid;
551         const int ntids = t->fs.type ? 4 : 1;
552
553         MPASS(!t->fs.hash);
554         /* Already validated against fconf, iconf */
555         MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
556         MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
557
558         if (separate_hpfilter_region(sc) && t->fs.prio) {
559                 MPASS(t->idx < sc->tids.nhpftids);
560                 f = &sc->tids.hpftid_tab[t->idx];
561                 tid = sc->tids.hpftid_base + t->idx;
562         } else {
563                 MPASS(t->idx < sc->tids.nftids);
564                 f = &sc->tids.ftid_tab[t->idx];
565                 tid = sc->tids.ftid_base + t->idx;
566         }
567         rc = busy = locked = 0;
568         mtx_lock(&sc->tids.ftid_lock);
569         for (i = 0; i < ntids; i++) {
570                 busy += f[i].pending + f[i].valid;
571                 locked += f[i].locked;
572         }
573         if (locked > 0)
574                 rc = EPERM;
575         else if (busy > 0)
576                 rc = EBUSY;
577         else {
578                 int len16;
579
580                 if (sc->params.filter2_wr_support)
581                         len16 = howmany(sizeof(struct fw_filter2_wr), 16);
582                 else
583                         len16 = howmany(sizeof(struct fw_filter_wr), 16);
584                 fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
585                 if (__predict_false(fwr == NULL))
586                         rc = ENOMEM;
587                 else {
588                         f->pending = 1;
589                         if (separate_hpfilter_region(sc) && t->fs.prio)
590                                 sc->tids.hpftids_in_use++;
591                         else
592                                 sc->tids.ftids_in_use++;
593                 }
594         }
595         mtx_unlock(&sc->tids.ftid_lock);
596         if (rc != 0) {
597                 if (l2te)
598                         t4_l2t_release(l2te);
599                 if (smt)
600                         t4_smt_release(smt);
601                 return (rc);
602         }
603
604         /*
605          * Can't fail now.  A set-filter WR will definitely be sent.
606          */
607
608         f->tid = tid;
609         f->fs = t->fs;
610         f->l2te = l2te;
611         f->smt = smt;
612
613         if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
614                 vnic_vld = 1;
615         else
616                 vnic_vld = 0;
617         if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
618                 vnic_vld_mask = 1;
619         else
620                 vnic_vld_mask = 0;
621
622         bzero(fwr, sizeof(*fwr));
623         if (sc->params.filter2_wr_support)
624                 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
625         else
626                 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
627         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
628         fwr->tid_to_iq =
629             htobe32(V_FW_FILTER_WR_TID(f->tid) |
630                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
631                 V_FW_FILTER_WR_NOREPLY(0) |
632                 V_FW_FILTER_WR_IQ(f->fs.iq));
633         fwr->del_filter_to_l2tix =
634             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
635                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
636                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
637                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
638                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
639                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
640                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
641                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
642                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
643                     f->fs.newvlan == VLAN_REWRITE) |
644                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
645                     f->fs.newvlan == VLAN_REWRITE) |
646                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
647                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
648                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
649                 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
650         fwr->ethtype = htobe16(f->fs.val.ethtype);
651         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
652         fwr->frag_to_ovlan_vldm =
653             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
654                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
655                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
656                 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
657                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
658                 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
659         fwr->smac_sel = 0;
660         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
661             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
662         fwr->maci_to_matchtypem =
663             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
664                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
665                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
666                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
667                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
668                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
669                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
670                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
671         fwr->ptcl = f->fs.val.proto;
672         fwr->ptclm = f->fs.mask.proto;
673         fwr->ttyp = f->fs.val.tos;
674         fwr->ttypm = f->fs.mask.tos;
675         fwr->ivlan = htobe16(f->fs.val.vlan);
676         fwr->ivlanm = htobe16(f->fs.mask.vlan);
677         fwr->ovlan = htobe16(f->fs.val.vnic);
678         fwr->ovlanm = htobe16(f->fs.mask.vnic);
679         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
680         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
681         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
682         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
683         fwr->lp = htobe16(f->fs.val.dport);
684         fwr->lpm = htobe16(f->fs.mask.dport);
685         fwr->fp = htobe16(f->fs.val.sport);
686         fwr->fpm = htobe16(f->fs.mask.sport);
687         /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
688         bzero(fwr->sma, sizeof (fwr->sma));
689         if (sc->params.filter2_wr_support) {
690                 fwr->filter_type_swapmac =
691                     V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
692                 fwr->natmode_to_ulp_type =
693                     V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
694                         ULP_MODE_TCPDDP : ULP_MODE_NONE) |
695                     V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
696                     V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
697                 memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
698                 memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
699                 fwr->newlport = htobe16(f->fs.nat_dport);
700                 fwr->newfport = htobe16(f->fs.nat_sport);
701                 fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
702         }
703         commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
704
705         /* Wait for response. */
706         mtx_lock(&sc->tids.ftid_lock);
707         for (;;) {
708                 if (f->pending == 0) {
709                         rc = f->valid ? 0 : EIO;
710                         break;
711                 }
712                 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
713                         rc = EINPROGRESS;
714                         break;
715                 }
716         }
717         mtx_unlock(&sc->tids.ftid_lock);
718         return (rc);
719 }
720
721 static int
722 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
723     uint64_t *ftuple)
724 {
725         struct tp_params *tp = &sc->params.tp;
726         uint64_t fmask;
727
728         *ftuple = fmask = 0;
729
730         /*
731          * Initialize each of the fields which we care about which are present
732          * in the Compressed Filter Tuple.
733          */
734         if (tp->vlan_shift >= 0 && fs->mask.vlan) {
735                 *ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
736                 fmask |= M_FT_VLAN << tp->vlan_shift;
737         }
738
739         if (tp->port_shift >= 0 && fs->mask.iport) {
740                 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
741                 fmask |= M_FT_PORT << tp->port_shift;
742         }
743
744         if (tp->protocol_shift >= 0 && fs->mask.proto) {
745                 *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
746                 fmask |= M_FT_PROTOCOL << tp->protocol_shift;
747         }
748
749         if (tp->tos_shift >= 0 && fs->mask.tos) {
750                 *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
751                 fmask |= M_FT_TOS << tp->tos_shift;
752         }
753
754         if (tp->vnic_shift >= 0 && fs->mask.vnic) {
755                 /* F_VNIC in ingress config was already validated. */
756                 if (tp->ingress_config & F_VNIC)
757                         MPASS(fs->mask.pfvf_vld);
758                 else
759                         MPASS(fs->mask.ovlan_vld);
760
761                 *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
762                 fmask |= M_FT_VNIC_ID << tp->vnic_shift;
763         }
764
765         if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
766                 *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
767                 fmask |= M_FT_MACMATCH << tp->macmatch_shift;
768         }
769
770         if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
771                 *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
772                 fmask |= M_FT_ETHERTYPE << tp->ethertype_shift;
773         }
774
775         if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
776                 *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
777                 fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift;
778         }
779
780         if (tp->frag_shift >= 0 && fs->mask.frag) {
781                 *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
782                 fmask |= M_FT_FRAGMENTATION << tp->frag_shift;
783         }
784
785         if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
786                 *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
787                 fmask |= M_FT_FCOE << tp->fcoe_shift;
788         }
789
790         /* A hashfilter must conform to the filterMask. */
791         if (fmask != tp->hash_filter_mask)
792                 return (EINVAL);
793
794         return (0);
795 }
796
797 static bool
798 is_4tuple_specified(struct t4_filter_specification *fs)
799 {
800         int i;
801         const int n = fs->type ? 16 : 4;
802
803         if (fs->mask.sport != 0xffff || fs->mask.dport != 0xffff)
804                 return (false);
805
806         for (i = 0; i < n; i++) {
807                 if (fs->mask.sip[i] != 0xff)
808                         return (false);
809                 if (fs->mask.dip[i] != 0xff)
810                         return (false);
811         }
812
813         return (true);
814 }
815
816 int
817 set_filter(struct adapter *sc, struct t4_filter *t)
818 {
819         struct tid_info *ti = &sc->tids;
820         struct l2t_entry *l2te;
821         struct smt_entry *smt;
822         uint64_t ftuple;
823         int rc;
824
825         /*
826          * Basic filter checks first.
827          */
828
829         if (t->fs.hash) {
830                 if (!is_hashfilter(sc) || ti->ntids == 0)
831                         return (ENOTSUP);
832                 /* Hardware, not user, selects a tid for hashfilters. */
833                 if (t->idx != (uint32_t)-1)
834                         return (EINVAL);
835                 /* T5 can't count hashfilter hits. */
836                 if (is_t5(sc) && t->fs.hitcnts)
837                         return (EINVAL);
838                 if (!is_4tuple_specified(&t->fs))
839                         return (EINVAL);
840                 rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
841                 if (rc != 0)
842                         return (rc);
843         } else {
844                 if (separate_hpfilter_region(sc) && t->fs.prio) {
845                         if (ti->nhpftids == 0)
846                                 return (ENOTSUP);
847                         if (t->idx >= ti->nhpftids)
848                                 return (EINVAL);
849                 } else {
850                         if (ti->nftids == 0)
851                                 return (ENOTSUP);
852                         if (t->idx >= ti->nftids)
853                                 return (EINVAL);
854                 }
855                 /* IPv6 filter idx must be 4 aligned */
856                 if (t->fs.type == 1 &&
857                     ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
858                         return (EINVAL);
859         }
860
861         /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
862         if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
863             (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
864             t->fs.swapmac || t->fs.nat_mode))
865                 return (ENOTSUP);
866
867         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
868                 return (EINVAL);
869         if (t->fs.val.iport >= sc->params.nports)
870                 return (EINVAL);
871
872         /* Can't specify an iq if not steering to it */
873         if (!t->fs.dirsteer && t->fs.iq)
874                 return (EINVAL);
875
876         /* Validate against the global filter mode and ingress config */
877         rc = check_fspec_against_fconf_iconf(sc, &t->fs);
878         if (rc != 0)
879                 return (rc);
880
881         /*
882          * Basic checks passed.  Make sure the queues and tid tables are setup.
883          */
884
885         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
886         if (rc)
887                 return (rc);
888         if (!(sc->flags & FULL_INIT_DONE) &&
889             ((rc = adapter_full_init(sc)) != 0)) {
890                 end_synchronized_op(sc, 0);
891                 return (rc);
892         }
893         if (t->fs.hash) {
894                 if (__predict_false(ti->hftid_hash_4t == NULL)) {
895                         rc = alloc_hftid_hash(&sc->tids, HASH_NOWAIT);
896                         if (rc != 0)
897                                 goto done;
898                 }
899                 if (__predict_false(sc->tids.atid_tab == NULL)) {
900                         rc = alloc_atid_tab(&sc->tids, M_NOWAIT);
901                         if (rc != 0)
902                                 goto done;
903                 }
904         } else if (separate_hpfilter_region(sc) && t->fs.prio &&
905             __predict_false(ti->hpftid_tab == NULL)) {
906                 MPASS(ti->nhpftids != 0);
907                 KASSERT(ti->hpftids_in_use == 0,
908                     ("%s: no memory allocated but hpftids_in_use is %u",
909                     __func__, ti->hpftids_in_use));
910                 ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
911                     ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
912                 if (ti->hpftid_tab == NULL) {
913                         rc = ENOMEM;
914                         goto done;
915                 }
916                 if (!mtx_initialized(&sc->tids.ftid_lock)) {
917                         mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
918                         cv_init(&ti->ftid_cv, "t4fcv");
919                 }
920         } else if (__predict_false(ti->ftid_tab == NULL)) {
921                 MPASS(ti->nftids != 0);
922                 KASSERT(ti->ftids_in_use == 0,
923                     ("%s: no memory allocated but ftids_in_use is %u",
924                     __func__, ti->ftids_in_use));
925                 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
926                     M_CXGBE, M_NOWAIT | M_ZERO);
927                 if (ti->ftid_tab == NULL) {
928                         rc = ENOMEM;
929                         goto done;
930                 }
931                 if (!mtx_initialized(&sc->tids.ftid_lock)) {
932                         mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
933                         cv_init(&ti->ftid_cv, "t4fcv");
934                 }
935         }
936 done:
937         end_synchronized_op(sc, 0);
938         if (rc != 0)
939                 return (rc);
940
941         /*
942          * Allocate L2T entry, SMT entry, etc.
943          */
944
945         l2te = NULL;
946         if (t->fs.newdmac || t->fs.newvlan) {
947                 /* This filter needs an L2T entry; allocate one. */
948                 l2te = t4_l2t_alloc_switching(sc->l2t);
949                 if (__predict_false(l2te == NULL))
950                         return (EAGAIN);
951                 rc = t4_l2t_set_switching(sc, l2te, t->fs.vlan, t->fs.eport,
952                     t->fs.dmac);
953                 if (rc) {
954                         t4_l2t_release(l2te);
955                         return (ENOMEM);
956                 }
957         }
958
959         smt = NULL;
960         if (t->fs.newsmac) {
961                 /* This filter needs an SMT entry; allocate one. */
962                 smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
963                 if (__predict_false(smt == NULL)) {
964                         if (l2te != NULL)
965                                 t4_l2t_release(l2te);
966                         return (EAGAIN);
967                 }
968                 rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
969                 if (rc) {
970                         t4_smt_release(smt);
971                         if (l2te != NULL)
972                                 t4_l2t_release(l2te);
973                         return (rc);
974                 }
975         }
976
977         if (t->fs.hash)
978                 return (set_hashfilter(sc, t, ftuple, l2te, smt));
979         else
980                 return (set_tcamfilter(sc, t, l2te, smt));
981
982 }
983
984 static int
985 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
986 {
987         struct filter_entry *f;
988         struct fw_filter_wr *fwr;
989         struct wrq_cookie cookie;
990         int rc, nfilters;
991 #ifdef INVARIANTS
992         u_int tid_base;
993 #endif
994
995         mtx_lock(&sc->tids.ftid_lock);
996         if (separate_hpfilter_region(sc) && t->fs.prio) {
997                 nfilters = sc->tids.nhpftids;
998                 f = sc->tids.hpftid_tab;
999 #ifdef INVARIANTS
1000                 tid_base = sc->tids.hpftid_base;
1001 #endif
1002         } else {
1003                 nfilters = sc->tids.nftids;
1004                 f = sc->tids.ftid_tab;
1005 #ifdef INVARIANTS
1006                 tid_base = sc->tids.ftid_base;
1007 #endif
1008         }
1009         MPASS(f != NULL);       /* Caller checked this. */
1010         if (t->idx >= nfilters) {
1011                 rc = EINVAL;
1012                 goto done;
1013         }
1014         f += t->idx;
1015
1016         if (f->locked) {
1017                 rc = EPERM;
1018                 goto done;
1019         }
1020         if (f->pending) {
1021                 rc = EBUSY;
1022                 goto done;
1023         }
1024         if (f->valid == 0) {
1025                 rc = EINVAL;
1026                 goto done;
1027         }
1028         MPASS(f->tid == tid_base + t->idx);
1029         fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
1030         if (fwr == NULL) {
1031                 rc = ENOMEM;
1032                 goto done;
1033         }
1034
1035         bzero(fwr, sizeof (*fwr));
1036         t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
1037         f->pending = 1;
1038         commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
1039         t->fs = f->fs;  /* extra info for the caller */
1040
1041         for (;;) {
1042                 if (f->pending == 0) {
1043                         rc = f->valid ? EIO : 0;
1044                         break;
1045                 }
1046                 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
1047                         rc = EINPROGRESS;
1048                         break;
1049                 }
1050         }
1051 done:
1052         mtx_unlock(&sc->tids.ftid_lock);
1053         return (rc);
1054 }
1055
1056 int
1057 del_filter(struct adapter *sc, struct t4_filter *t)
1058 {
1059
1060         /* No filters possible if not initialized yet. */
1061         if (!(sc->flags & FULL_INIT_DONE))
1062                 return (EINVAL);
1063
1064         /*
1065          * The checks for tid tables ensure that the locks that del_* will reach
1066          * for are initialized.
1067          */
1068         if (t->fs.hash) {
1069                 if (sc->tids.hftid_hash_4t != NULL)
1070                         return (del_hashfilter(sc, t));
1071         } else if (separate_hpfilter_region(sc) && t->fs.prio) {
1072                 if (sc->tids.hpftid_tab != NULL)
1073                         return (del_tcamfilter(sc, t));
1074         } else {
1075                 if (sc->tids.ftid_tab != NULL)
1076                         return (del_tcamfilter(sc, t));
1077         }
1078
1079         return (EINVAL);
1080 }
1081
1082 /*
1083  * Release secondary resources associated with the filter.
1084  */
1085 static void
1086 free_filter_resources(struct filter_entry *f)
1087 {
1088
1089         if (f->l2te) {
1090                 t4_l2t_release(f->l2te);
1091                 f->l2te = NULL;
1092         }
1093         if (f->smt) {
1094                 t4_smt_release(f->smt);
1095                 f->smt = NULL;
1096         }
1097 }
1098
1099 static int
1100 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1101     uint64_t val, int no_reply)
1102 {
1103         struct wrq_cookie cookie;
1104         struct cpl_set_tcb_field *req;
1105
1106         req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
1107         if (req == NULL)
1108                 return (ENOMEM);
1109         bzero(req, sizeof(*req));
1110         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1111         if (no_reply == 0) {
1112                 req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1113                     V_NO_REPLY(0));
1114         } else
1115                 req->reply_ctrl = htobe16(V_NO_REPLY(1));
1116         req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1117         req->mask = htobe64(mask);
1118         req->val = htobe64(val);
1119         commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
1120
1121         return (0);
1122 }
1123
1124 /* Set one of the t_flags bits in the TCB. */
1125 static inline int
1126 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
1127     u_int no_reply)
1128 {
1129
1130         return (set_tcb_field(sc, tid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
1131             (uint64_t)val << bit_pos, no_reply));
1132 }
1133
1134 int
1135 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1136 {
1137         struct adapter *sc = iq->adapter;
1138         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1139         u_int tid = GET_TID(rpl);
1140         u_int rc, idx;
1141         struct filter_entry *f;
1142
1143         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
1144             rss->opcode));
1145
1146
1147         if (is_hpftid(sc, tid)) {
1148                 idx = tid - sc->tids.hpftid_base;
1149                 f = &sc->tids.hpftid_tab[idx];
1150         } else if (is_ftid(sc, tid)) {
1151                 idx = tid - sc->tids.ftid_base;
1152                 f = &sc->tids.ftid_tab[idx];
1153         } else
1154                 panic("%s: FW reply for invalid TID %d.", __func__, tid);
1155
1156         MPASS(f->tid == tid);
1157         rc = G_COOKIE(rpl->cookie);
1158
1159         mtx_lock(&sc->tids.ftid_lock);
1160         KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
1161             __func__, rc, tid));
1162         switch(rc) {
1163         case FW_FILTER_WR_FLT_ADDED:
1164                 /* set-filter succeeded */
1165                 f->valid = 1;
1166                 if (f->fs.newsmac) {
1167                         MPASS(f->smt != NULL);
1168                         set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1169                         set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1170                             V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1171                             V_TCB_SMAC_SEL(f->smt->idx), 1);
1172                         /* XXX: wait for reply to TCB update before !pending */
1173                 }
1174                 break;
1175         case FW_FILTER_WR_FLT_DELETED:
1176                 /* del-filter succeeded */
1177                 MPASS(f->valid == 1);
1178                 f->valid = 0;
1179                 /* Fall through */
1180         case FW_FILTER_WR_SMT_TBL_FULL:
1181                 /* set-filter failed due to lack of SMT space. */
1182                 MPASS(f->valid == 0);
1183                 free_filter_resources(f);
1184                 if (separate_hpfilter_region(sc) && f->fs.prio)
1185                         sc->tids.hpftids_in_use--;
1186                 else
1187                         sc->tids.ftids_in_use--;
1188                 break;
1189         case FW_FILTER_WR_SUCCESS:
1190         case FW_FILTER_WR_EINVAL:
1191         default:
1192                 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
1193                     idx);
1194         }
1195         f->pending = 0;
1196         cv_broadcast(&sc->tids.ftid_cv);
1197         mtx_unlock(&sc->tids.ftid_lock);
1198
1199         return (0);
1200 }
1201
1202 /*
1203  * This is the reply to the Active Open that created the filter.  Additional TCB
1204  * updates may be required to complete the filter configuration.
1205  */
1206 int
1207 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1208     struct mbuf *m)
1209 {
1210         struct adapter *sc = iq->adapter;
1211         const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1212         u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1213         u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1214         struct filter_entry *f = lookup_atid(sc, atid);
1215
1216         KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1217
1218         mtx_lock(&sc->tids.hftid_lock);
1219         KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1220         KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1221             __func__, f, f->tid));
1222         if (status == CPL_ERR_NONE) {
1223                 f->tid = GET_TID(cpl);
1224                 MPASS(lookup_hftid(sc, f->tid) == NULL);
1225                 insert_hftid(sc, f);
1226                 /*
1227                  * Leave the filter pending until it is fully set up, which will
1228                  * be indicated by the reply to the last TCB update.  No need to
1229                  * unblock the ioctl thread either.
1230                  */
1231                 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1232                         goto done;
1233                 f->valid = 1;
1234                 f->pending = 0;
1235         } else {
1236                 /* provide errno instead of tid to ioctl */
1237                 f->tid = act_open_rpl_status_to_errno(status);
1238                 f->valid = 0;
1239                 if (act_open_has_tid(status))
1240                         release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1241                 free_filter_resources(f);
1242                 remove_hf(sc, f);
1243                 if (f->locked == 0)
1244                         free(f, M_CXGBE);
1245         }
1246         cv_broadcast(&sc->tids.hftid_cv);
1247 done:
1248         mtx_unlock(&sc->tids.hftid_lock);
1249
1250         free_atid(sc, atid);
1251         return (0);
1252 }
1253
1254 int
1255 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1256     struct mbuf *m)
1257 {
1258         struct adapter *sc = iq->adapter;
1259         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1260         u_int tid = GET_TID(rpl);
1261         struct filter_entry *f;
1262
1263         mtx_lock(&sc->tids.hftid_lock);
1264         f = lookup_hftid(sc, tid);
1265         KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1266         KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1267             f, tid));
1268         KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1269             __func__, f, tid));
1270         f->pending = 0;
1271         if (rpl->status == 0) {
1272                 f->valid = 1;
1273         } else {
1274                 f->tid = EIO;
1275                 f->valid = 0;
1276                 free_filter_resources(f);
1277                 remove_hftid(sc, f);
1278                 remove_hf(sc, f);
1279                 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1280                 if (f->locked == 0)
1281                         free(f, M_CXGBE);
1282         }
1283         cv_broadcast(&sc->tids.hftid_cv);
1284         mtx_unlock(&sc->tids.hftid_lock);
1285
1286         return (0);
1287 }
1288
1289 int
1290 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1291     struct mbuf *m)
1292 {
1293         struct adapter *sc = iq->adapter;
1294         const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1295         unsigned int tid = GET_TID(cpl);
1296         struct filter_entry *f;
1297
1298         mtx_lock(&sc->tids.hftid_lock);
1299         f = lookup_hftid(sc, tid);
1300         KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1301         KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1302             f, tid));
1303         KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1304             tid));
1305         f->pending = 0;
1306         if (cpl->status == 0) {
1307                 f->valid = 0;
1308                 free_filter_resources(f);
1309                 remove_hftid(sc, f);
1310                 remove_hf(sc, f);
1311                 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1312                 if (f->locked == 0)
1313                         free(f, M_CXGBE);
1314         }
1315         cv_broadcast(&sc->tids.hftid_cv);
1316         mtx_unlock(&sc->tids.hftid_lock);
1317
1318         return (0);
1319 }
1320
1321 static int
1322 get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1323 {
1324         int i, nfilters;
1325         struct filter_entry *f;
1326         u_int in_use;
1327 #ifdef INVARIANTS
1328         u_int tid_base;
1329 #endif
1330
1331         MPASS(!t->fs.hash);
1332
1333         if (separate_hpfilter_region(sc) && t->fs.prio) {
1334                 nfilters = sc->tids.nhpftids;
1335                 f = sc->tids.hpftid_tab;
1336                 in_use = sc->tids.hpftids_in_use;
1337 #ifdef INVARIANTS
1338                 tid_base = sc->tids.hpftid_base;
1339 #endif
1340         } else {
1341                 nfilters = sc->tids.nftids;
1342                 f = sc->tids.ftid_tab;
1343                 in_use = sc->tids.ftids_in_use;
1344 #ifdef INVARIANTS
1345                 tid_base = sc->tids.ftid_base;
1346 #endif
1347         }
1348
1349         if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1350                 t->idx = 0xffffffff;
1351                 return (0);
1352         }
1353
1354         f += t->idx;
1355         mtx_lock(&sc->tids.ftid_lock);
1356         for (i = t->idx; i < nfilters; i++, f++) {
1357                 if (f->valid) {
1358                         MPASS(f->tid == tid_base + i);
1359                         t->idx = i;
1360                         t->l2tidx = f->l2te ? f->l2te->idx : 0;
1361                         t->smtidx = f->smt ? f->smt->idx : 0;
1362                         if (f->fs.hitcnts)
1363                                 t->hits = get_filter_hits(sc, f->tid);
1364                         else
1365                                 t->hits = UINT64_MAX;
1366                         t->fs = f->fs;
1367
1368                         goto done;
1369                 }
1370         }
1371         t->idx = 0xffffffff;
1372 done:
1373         mtx_unlock(&sc->tids.ftid_lock);
1374         return (0);
1375 }
1376
1377 static int
1378 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1379 {
1380         struct tid_info *ti = &sc->tids;
1381         int tid;
1382         struct filter_entry *f;
1383         const int inv_tid = ti->ntids + ti->tid_base;
1384
1385         MPASS(t->fs.hash);
1386
1387         if (ti->tids_in_use == 0 || ti->hftid_hash_tid == NULL ||
1388             t->idx >= inv_tid) {
1389                 t->idx = 0xffffffff;
1390                 return (0);
1391         }
1392         if (t->idx < ti->tid_base)
1393                 t->idx = ti->tid_base;
1394
1395         mtx_lock(&ti->hftid_lock);
1396         for (tid = t->idx; tid < inv_tid; tid++) {
1397                 f = lookup_hftid(sc, tid);
1398                 if (f != NULL && f->valid) {
1399                         t->idx = tid;
1400                         t->l2tidx = f->l2te ? f->l2te->idx : 0;
1401                         t->smtidx = f->smt ? f->smt->idx : 0;
1402                         if (f->fs.hitcnts)
1403                                 t->hits = get_filter_hits(sc, tid);
1404                         else
1405                                 t->hits = UINT64_MAX;
1406                         t->fs = f->fs;
1407
1408                         goto done;
1409                 }
1410         }
1411         t->idx = 0xffffffff;
1412 done:
1413         mtx_unlock(&ti->hftid_lock);
1414         return (0);
1415 }
1416
1417 static void
1418 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1419     uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1420 {
1421         struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1422         struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1423
1424         /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1425         MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1426         MPASS(atid >= 0);
1427
1428         if (chip_id(sc) == CHELSIO_T5) {
1429                 INIT_TP_WR(cpl5, 0);
1430         } else {
1431                 INIT_TP_WR(cpl6, 0);
1432                 cpl6->rsvd2 = 0;
1433                 cpl6->opt3 = 0;
1434         }
1435
1436         OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1437             V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1438             V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1439         cpl->local_port = htobe16(f->fs.val.dport);
1440         cpl->peer_port = htobe16(f->fs.val.sport);
1441         cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1442         cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1443         cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1444         cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1445         cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1446             f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1447             V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1448             V_NO_CONG(f->fs.rpttid) |
1449             V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1450             F_TCAM_BYPASS | F_NON_OFFLOAD);
1451
1452         cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1453         cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1454             V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1455             V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1456             F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1457             V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1458             V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1459 }
1460
1461 static void
1462 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1463     uint64_t ftuple, struct cpl_act_open_req *cpl)
1464 {
1465         struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1466         struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1467
1468         /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1469         MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1470         MPASS(atid >= 0);
1471
1472         if (chip_id(sc) == CHELSIO_T5) {
1473                 INIT_TP_WR(cpl5, 0);
1474         } else {
1475                 INIT_TP_WR(cpl6, 0);
1476                 cpl6->rsvd2 = 0;
1477                 cpl6->opt3 = 0;
1478         }
1479
1480         OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1481             V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1482             V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1483         cpl->local_port = htobe16(f->fs.val.dport);
1484         cpl->peer_port = htobe16(f->fs.val.sport);
1485         cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1486             f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1487         cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1488                 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1489         cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1490             f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1491             V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1492             V_NO_CONG(f->fs.rpttid) |
1493             V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1494             F_TCAM_BYPASS | F_NON_OFFLOAD);
1495
1496         cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1497         cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1498             V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1499             V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1500             F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1501             V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1502             V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1503 }
1504
1505 static int
1506 act_open_cpl_len16(struct adapter *sc, int isipv6)
1507 {
1508         int idx;
1509         static const int sz_table[3][2] = {
1510                 {
1511                         howmany(sizeof (struct cpl_act_open_req), 16),
1512                         howmany(sizeof (struct cpl_act_open_req6), 16)
1513                 },
1514                 {
1515                         howmany(sizeof (struct cpl_t5_act_open_req), 16),
1516                         howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1517                 },
1518                 {
1519                         howmany(sizeof (struct cpl_t6_act_open_req), 16),
1520                         howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1521                 },
1522         };
1523
1524         MPASS(chip_id(sc) >= CHELSIO_T4);
1525         idx = min(chip_id(sc) - CHELSIO_T4, 2);
1526
1527         return (sz_table[idx][!!isipv6]);
1528 }
1529
1530 static int
1531 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1532     struct l2t_entry *l2te, struct smt_entry *smt)
1533 {
1534         void *wr;
1535         struct wrq_cookie cookie;
1536         struct filter_entry *f;
1537         int rc, atid = -1;
1538         uint32_t hash;
1539
1540         MPASS(t->fs.hash);
1541         /* Already validated against fconf, iconf */
1542         MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1543         MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1544
1545         hash = hf_hashfn_4t(&t->fs);
1546
1547         mtx_lock(&sc->tids.hftid_lock);
1548         if (lookup_hf(sc, &t->fs, hash) != NULL) {
1549                 rc = EEXIST;
1550                 goto done;
1551         }
1552
1553         f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1554         if (__predict_false(f == NULL)) {
1555                 if (l2te)
1556                         t4_l2t_release(l2te);
1557                 if (smt)
1558                         t4_smt_release(smt);
1559                 rc = ENOMEM;
1560                 goto done;
1561         }
1562         f->fs = t->fs;
1563         f->l2te = l2te;
1564         f->smt = smt;
1565
1566         atid = alloc_atid(sc, f);
1567         if (__predict_false(atid) == -1) {
1568                 if (l2te)
1569                         t4_l2t_release(l2te);
1570                 if (smt)
1571                         t4_smt_release(smt);
1572                 free(f, M_CXGBE);
1573                 rc = EAGAIN;
1574                 goto done;
1575         }
1576         MPASS(atid >= 0);
1577
1578         wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1579             &cookie);
1580         if (wr == NULL) {
1581                 free_atid(sc, atid);
1582                 if (l2te)
1583                         t4_l2t_release(l2te);
1584                 if (smt)
1585                         t4_smt_release(smt);
1586                 free(f, M_CXGBE);
1587                 rc = ENOMEM;
1588                 goto done;
1589         }
1590         if (f->fs.type)
1591                 mk_act_open_req6(sc, f, atid, ftuple, wr);
1592         else
1593                 mk_act_open_req(sc, f, atid, ftuple, wr);
1594
1595         f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1596         f->pending = 1;
1597         f->tid = -1;
1598         insert_hf(sc, f, hash);
1599         commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1600
1601         for (;;) {
1602                 MPASS(f->locked);
1603                 if (f->pending == 0) {
1604                         if (f->valid) {
1605                                 rc = 0;
1606                                 f->locked = 0;
1607                                 t->idx = f->tid;
1608                         } else {
1609                                 remove_hf(sc, f);
1610                                 rc = f->tid;
1611                                 free(f, M_CXGBE);
1612                         }
1613                         break;
1614                 }
1615                 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1616                         f->locked = 0;
1617                         rc = EINPROGRESS;
1618                         break;
1619                 }
1620         }
1621 done:
1622         mtx_unlock(&sc->tids.hftid_lock);
1623         return (rc);
1624 }
1625
1626 /* SET_TCB_FIELD sent as a ULP command looks like this */
1627 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1628     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1629
1630 static void *
1631 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1632                 uint64_t val, uint32_t tid, uint32_t qid)
1633 {
1634         struct ulptx_idata *ulpsc;
1635         struct cpl_set_tcb_field_core *req;
1636
1637         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1638         ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1639
1640         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1641         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1642         ulpsc->len = htobe32(sizeof(*req));
1643
1644         req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1645         OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1646         req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1647         req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1648         req->mask = htobe64(mask);
1649         req->val = htobe64(val);
1650
1651         ulpsc = (struct ulptx_idata *)(req + 1);
1652         if (LEN__SET_TCB_FIELD_ULP % 16) {
1653                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1654                 ulpsc->len = htobe32(0);
1655                 return (ulpsc + 1);
1656         }
1657         return (ulpsc);
1658 }
1659
1660 /* ABORT_REQ sent as a ULP command looks like this */
1661 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1662         sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1663
1664 static void *
1665 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1666 {
1667         struct ulptx_idata *ulpsc;
1668         struct cpl_abort_req_core *req;
1669
1670         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1671         ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1672
1673         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1674         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1675         ulpsc->len = htobe32(sizeof(*req));
1676
1677         req = (struct cpl_abort_req_core *)(ulpsc + 1);
1678         OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1679         req->rsvd0 = htonl(0);
1680         req->rsvd1 = 0;
1681         req->cmd = CPL_ABORT_NO_RST;
1682
1683         ulpsc = (struct ulptx_idata *)(req + 1);
1684         if (LEN__ABORT_REQ_ULP % 16) {
1685                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1686                 ulpsc->len = htobe32(0);
1687                 return (ulpsc + 1);
1688         }
1689         return (ulpsc);
1690 }
1691
1692 /* ABORT_RPL sent as a ULP command looks like this */
1693 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1694         sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1695
1696 static void *
1697 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1698 {
1699         struct ulptx_idata *ulpsc;
1700         struct cpl_abort_rpl_core *rpl;
1701
1702         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1703         ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1704
1705         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1706         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1707         ulpsc->len = htobe32(sizeof(*rpl));
1708
1709         rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1710         OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1711         rpl->rsvd0 = htonl(0);
1712         rpl->rsvd1 = 0;
1713         rpl->cmd = CPL_ABORT_NO_RST;
1714
1715         ulpsc = (struct ulptx_idata *)(rpl + 1);
1716         if (LEN__ABORT_RPL_ULP % 16) {
1717                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1718                 ulpsc->len = htobe32(0);
1719                 return (ulpsc + 1);
1720         }
1721         return (ulpsc);
1722 }
1723
1724 static inline int
1725 del_hashfilter_wrlen(void)
1726 {
1727
1728         return (sizeof(struct work_request_hdr) +
1729             roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1730             roundup2(LEN__ABORT_REQ_ULP, 16) +
1731             roundup2(LEN__ABORT_RPL_ULP, 16));
1732 }
1733
1734 static void
1735 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1736 {
1737         struct ulp_txpkt *ulpmc;
1738
1739         INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1740         ulpmc = (struct ulp_txpkt *)(wrh + 1);
1741         ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1742             V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1743         ulpmc = mk_abort_req_ulp(ulpmc, tid);
1744         ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1745 }
1746
1747 static int
1748 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1749 {
1750         struct tid_info *ti = &sc->tids;
1751         void *wr;
1752         struct filter_entry *f;
1753         struct wrq_cookie cookie;
1754         int rc;
1755         const int wrlen = del_hashfilter_wrlen();
1756         const int inv_tid = ti->ntids + ti->tid_base;
1757
1758         MPASS(sc->tids.hftid_hash_4t != NULL);
1759         MPASS(sc->tids.ntids > 0);
1760
1761         if (t->idx < sc->tids.tid_base || t->idx >= inv_tid)
1762                 return (EINVAL);
1763
1764         mtx_lock(&ti->hftid_lock);
1765         f = lookup_hftid(sc, t->idx);
1766         if (f == NULL || f->valid == 0) {
1767                 rc = EINVAL;
1768                 goto done;
1769         }
1770         MPASS(f->tid == t->idx);
1771         if (f->locked) {
1772                 rc = EPERM;
1773                 goto done;
1774         }
1775         if (f->pending) {
1776                 rc = EBUSY;
1777                 goto done;
1778         }
1779         wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
1780         if (wr == NULL) {
1781                 rc = ENOMEM;
1782                 goto done;
1783         }
1784
1785         mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1786         f->locked = 1;
1787         f->pending = 1;
1788         commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1789         t->fs = f->fs;  /* extra info for the caller */
1790
1791         for (;;) {
1792                 MPASS(f->locked);
1793                 if (f->pending == 0) {
1794                         if (f->valid) {
1795                                 f->locked = 0;
1796                                 rc = EIO;
1797                         } else {
1798                                 rc = 0;
1799                                 free(f, M_CXGBE);
1800                         }
1801                         break;
1802                 }
1803                 if (cv_wait_sig(&ti->hftid_cv, &ti->hftid_lock) != 0) {
1804                         f->locked = 0;
1805                         rc = EINPROGRESS;
1806                         break;
1807                 }
1808         }
1809 done:
1810         mtx_unlock(&ti->hftid_lock);
1811         return (rc);
1812 }
1813
1814 #define WORD_MASK       0xffffffff
1815 static void
1816 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1817     const bool sip, const bool dp, const bool sp)
1818 {
1819
1820         if (dip) {
1821                 if (f->fs.type) {
1822                         set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1823                             f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1824                             f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1825
1826                         set_tcb_field(sc, f->tid,
1827                             W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1828                             f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1829                             f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1830
1831                         set_tcb_field(sc, f->tid,
1832                             W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1833                             f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1834                             f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1835
1836                         set_tcb_field(sc, f->tid,
1837                             W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1838                             f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1839                             f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1840                 } else {
1841                         set_tcb_field(sc, f->tid,
1842                             W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1843                             f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1844                             f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1845                 }
1846         }
1847
1848         if (sip) {
1849                 if (f->fs.type) {
1850                         set_tcb_field(sc, f->tid,
1851                             W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1852                             f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1853                             f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1854
1855                         set_tcb_field(sc, f->tid,
1856                             W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1857                             f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1858                             f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1859
1860                         set_tcb_field(sc, f->tid,
1861                             W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1862                             f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1863                             f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1864
1865                         set_tcb_field(sc, f->tid,
1866                             W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1867                             f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1868                             f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1869
1870                 } else {
1871                         set_tcb_field(sc, f->tid,
1872                             W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1873                             f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1874                             f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1875                 }
1876         }
1877
1878         set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1879             (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1880 }
1881
1882 /*
1883  * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1884  * last of the series of updates requested a reply.  The reply informs the
1885  * driver that the filter is fully setup.
1886  */
1887 static int
1888 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1889 {
1890         int updated = 0;
1891
1892         MPASS(f->tid < sc->tids.ntids);
1893         MPASS(f->fs.hash);
1894         MPASS(f->pending);
1895         MPASS(f->valid == 0);
1896
1897         if (f->fs.newdmac) {
1898                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1899                 updated++;
1900         }
1901
1902         if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1903                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1904                 updated++;
1905         }
1906
1907         if (f->fs.newsmac) {
1908                 MPASS(f->smt != NULL);
1909                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1910                 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1911                     V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1912                     1);
1913                 updated++;
1914         }
1915
1916         switch(f->fs.nat_mode) {
1917         case NAT_MODE_NONE:
1918                 break;
1919         case NAT_MODE_DIP:
1920                 set_nat_params(sc, f, true, false, false, false);
1921                 updated++;
1922                 break;
1923         case NAT_MODE_DIP_DP:
1924                 set_nat_params(sc, f, true, false, true, false);
1925                 updated++;
1926                 break;
1927         case NAT_MODE_DIP_DP_SIP:
1928                 set_nat_params(sc, f, true, true, true, false);
1929                 updated++;
1930                 break;
1931         case NAT_MODE_DIP_DP_SP:
1932                 set_nat_params(sc, f, true, false, true, true);
1933                 updated++;
1934                 break;
1935         case NAT_MODE_SIP_SP:
1936                 set_nat_params(sc, f, false, true, false, true);
1937                 updated++;
1938                 break;
1939         case NAT_MODE_DIP_SIP_SP:
1940                 set_nat_params(sc, f, true, true, false, true);
1941                 updated++;
1942                 break;
1943         case NAT_MODE_ALL:
1944                 set_nat_params(sc, f, true, true, true, true);
1945                 updated++;
1946                 break;
1947         default:
1948                 MPASS(0);       /* should have been validated earlier */
1949                 break;
1950
1951         }
1952
1953         if (f->fs.nat_seq_chk) {
1954                 set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
1955                     V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1956                     V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
1957                 updated++;
1958         }
1959
1960         if (is_t5(sc) && f->fs.action == FILTER_DROP) {
1961                 /*
1962                  * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
1963                  */
1964                 set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
1965                     V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
1966                 updated++;
1967         }
1968
1969         /*
1970          * Enable switching after all secondary resources (L2T entry, SMT entry,
1971          * etc.) are setup so that any switched packet will use correct
1972          * values.
1973          */
1974         if (f->fs.action == FILTER_SWITCH) {
1975                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
1976                 updated++;
1977         }
1978
1979         if (f->fs.hitcnts || updated > 0) {
1980                 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1981                     V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1982                     V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1983                     V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1984                 return (EINPROGRESS);
1985         }
1986
1987         return (0);
1988 }