]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/t4_filter.c
cxgbei: Parse all PDUs received prior to enabling offload mode.
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / t4_filter.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/fnv_hash.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/bus.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/rwlock.h>
44 #include <sys/socket.h>
45 #include <sys/sbuf.h>
46 #include <netinet/in.h>
47
48 #include "common/common.h"
49 #include "common/t4_msg.h"
50 #include "common/t4_regs.h"
51 #include "common/t4_regs_values.h"
52 #include "common/t4_tcb.h"
53 #include "t4_l2t.h"
54 #include "t4_smt.h"
55
56 struct filter_entry {
57         LIST_ENTRY(filter_entry) link_4t;
58         LIST_ENTRY(filter_entry) link_tid;
59
60         uint32_t valid:1;       /* filter allocated and valid */
61         uint32_t locked:1;      /* filter is administratively locked or busy */
62         uint32_t pending:1;     /* filter action is pending firmware reply */
63         int tid;                /* tid of the filter TCB */
64         struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
65         struct smt_entry *smt;  /* SMT entry for SMAC rewrite */
66
67         struct t4_filter_specification fs;
68 };
69
70 static void free_filter_resources(struct filter_entry *);
71 static int get_tcamfilter(struct adapter *, struct t4_filter *);
72 static int get_hashfilter(struct adapter *, struct t4_filter *);
73 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
74     struct l2t_entry *, struct smt_entry *);
75 static int del_hashfilter(struct adapter *, struct t4_filter *);
76 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
77
78 static inline bool
79 separate_hpfilter_region(struct adapter *sc)
80 {
81
82         return (chip_id(sc) >= CHELSIO_T6);
83 }
84
85 static inline uint32_t
86 hf_hashfn_4t(struct t4_filter_specification *fs)
87 {
88         struct t4_filter_tuple *ft = &fs->val;
89         uint32_t hash;
90
91         if (fs->type) {
92                 /* IPv6 */
93                 hash = fnv_32_buf(&ft->sip[0], 16, FNV1_32_INIT);
94                 hash = fnv_32_buf(&ft->dip[0], 16, hash);
95         } else {
96                 hash = fnv_32_buf(&ft->sip[0], 4, FNV1_32_INIT);
97                 hash = fnv_32_buf(&ft->dip[0], 4, hash);
98         }
99         hash = fnv_32_buf(&ft->sport, sizeof(ft->sport), hash);
100         hash = fnv_32_buf(&ft->dport, sizeof(ft->dport), hash);
101
102         return (hash);
103 }
104
105 static inline uint32_t
106 hf_hashfn_tid(int tid)
107 {
108
109         return (fnv_32_buf(&tid, sizeof(tid), FNV1_32_INIT));
110 }
111
112 static int
113 alloc_hftid_hash(struct tid_info *t, int flags)
114 {
115         int n;
116
117         MPASS(t->ntids > 0);
118         MPASS(t->hftid_hash_4t == NULL);
119         MPASS(t->hftid_hash_tid == NULL);
120
121         n = max(t->ntids / 1024, 16);
122         t->hftid_hash_4t = hashinit_flags(n, M_CXGBE, &t->hftid_4t_mask, flags);
123         if (t->hftid_hash_4t == NULL)
124                 return (ENOMEM);
125         t->hftid_hash_tid = hashinit_flags(n, M_CXGBE, &t->hftid_tid_mask,
126             flags);
127         if (t->hftid_hash_tid == NULL) {
128                 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
129                 t->hftid_hash_4t = NULL;
130                 return (ENOMEM);
131         }
132
133         mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
134         cv_init(&t->hftid_cv, "t4hfcv");
135
136         return (0);
137 }
138
139 void
140 free_hftid_hash(struct tid_info *t)
141 {
142         struct filter_entry *f, *ftmp;
143         LIST_HEAD(, filter_entry) *head;
144         int i;
145 #ifdef INVARIANTS
146         int n = 0;
147 #endif
148
149         if (t->tids_in_use > 0) {
150                 /* Remove everything from the tid hash. */
151                 head = t->hftid_hash_tid;
152                 for (i = 0; i <= t->hftid_tid_mask; i++) {
153                         LIST_FOREACH_SAFE(f, &head[i], link_tid, ftmp) {
154                                 LIST_REMOVE(f, link_tid);
155                         }
156                 }
157
158                 /* Remove and then free each filter in the 4t hash. */
159                 head = t->hftid_hash_4t;
160                 for (i = 0; i <= t->hftid_4t_mask; i++) {
161                         LIST_FOREACH_SAFE(f, &head[i], link_4t, ftmp) {
162 #ifdef INVARIANTS
163                                 n += f->fs.type ? 2 : 1;
164 #endif
165                                 LIST_REMOVE(f, link_4t);
166                                 free(f, M_CXGBE);
167                         }
168                 }
169                 MPASS(t->tids_in_use == n);
170                 t->tids_in_use = 0;
171         }
172
173         if (t->hftid_hash_4t) {
174                 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
175                 t->hftid_hash_4t = NULL;
176         }
177         if (t->hftid_hash_tid) {
178                 hashdestroy(t->hftid_hash_tid, M_CXGBE, t->hftid_tid_mask);
179                 t->hftid_hash_tid = NULL;
180         }
181         if (mtx_initialized(&t->hftid_lock)) {
182                 mtx_destroy(&t->hftid_lock);
183                 cv_destroy(&t->hftid_cv);
184         }
185 }
186
187 static void
188 insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash)
189 {
190         struct tid_info *t = &sc->tids;
191         LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
192
193         MPASS(head != NULL);
194         if (hash == 0)
195                 hash = hf_hashfn_4t(&f->fs);
196         LIST_INSERT_HEAD(&head[hash & t->hftid_4t_mask], f, link_4t);
197         atomic_add_int(&t->tids_in_use, f->fs.type ? 2 : 1);
198 }
199
200 static void
201 insert_hftid(struct adapter *sc, struct filter_entry *f)
202 {
203         struct tid_info *t = &sc->tids;
204         LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
205         uint32_t hash;
206
207         MPASS(f->tid >= t->tid_base);
208         MPASS(f->tid - t->tid_base < t->ntids);
209         mtx_assert(&t->hftid_lock, MA_OWNED);
210
211         hash = hf_hashfn_tid(f->tid);
212         LIST_INSERT_HEAD(&head[hash & t->hftid_tid_mask], f, link_tid);
213 }
214
215 static bool
216 filter_eq(struct t4_filter_specification *fs1,
217     struct t4_filter_specification *fs2)
218 {
219         int n;
220
221         MPASS(fs1->hash && fs2->hash);
222
223         if (fs1->type != fs2->type)
224                 return (false);
225
226         n = fs1->type ? 16 : 4;
227         if (bcmp(&fs1->val.sip[0], &fs2->val.sip[0], n) ||
228             bcmp(&fs1->val.dip[0], &fs2->val.dip[0], n) ||
229             fs1->val.sport != fs2->val.sport ||
230             fs1->val.dport != fs2->val.dport)
231                 return (false);
232
233         /*
234          * We know the masks are the same because all hashfilters conform to the
235          * global tp->filter_mask and the driver has verified that already.
236          */
237
238         if ((fs1->mask.pfvf_vld || fs1->mask.ovlan_vld) &&
239             fs1->val.vnic != fs2->val.vnic)
240                 return (false);
241         if (fs1->mask.vlan_vld && fs1->val.vlan != fs2->val.vlan)
242                 return (false);
243         if (fs1->mask.macidx && fs1->val.macidx != fs2->val.macidx)
244                 return (false);
245         if (fs1->mask.frag && fs1->val.frag != fs2->val.frag)
246                 return (false);
247         if (fs1->mask.matchtype && fs1->val.matchtype != fs2->val.matchtype)
248                 return (false);
249         if (fs1->mask.iport && fs1->val.iport != fs2->val.iport)
250                 return (false);
251         if (fs1->mask.fcoe && fs1->val.fcoe != fs2->val.fcoe)
252                 return (false);
253         if (fs1->mask.proto && fs1->val.proto != fs2->val.proto)
254                 return (false);
255         if (fs1->mask.tos && fs1->val.tos != fs2->val.tos)
256                 return (false);
257         if (fs1->mask.ethtype && fs1->val.ethtype != fs2->val.ethtype)
258                 return (false);
259
260         return (true);
261 }
262
263 static struct filter_entry *
264 lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash)
265 {
266         struct tid_info *t = &sc->tids;
267         LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
268         struct filter_entry *f;
269
270         mtx_assert(&t->hftid_lock, MA_OWNED);
271         MPASS(head != NULL);
272
273         if (hash == 0)
274                 hash = hf_hashfn_4t(fs);
275
276         LIST_FOREACH(f, &head[hash & t->hftid_4t_mask], link_4t) {
277                 if (filter_eq(&f->fs, fs))
278                         return (f);
279         }
280
281         return (NULL);
282 }
283
284 static struct filter_entry *
285 lookup_hftid(struct adapter *sc, int tid)
286 {
287         struct tid_info *t = &sc->tids;
288         LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
289         struct filter_entry *f;
290         uint32_t hash;
291
292         mtx_assert(&t->hftid_lock, MA_OWNED);
293         MPASS(head != NULL);
294
295         hash = hf_hashfn_tid(tid);
296         LIST_FOREACH(f, &head[hash & t->hftid_tid_mask], link_tid) {
297                 if (f->tid == tid)
298                         return (f);
299         }
300
301         return (NULL);
302 }
303
304 static void
305 remove_hf(struct adapter *sc, struct filter_entry *f)
306 {
307         struct tid_info *t = &sc->tids;
308
309         mtx_assert(&t->hftid_lock, MA_OWNED);
310
311         LIST_REMOVE(f, link_4t);
312         atomic_subtract_int(&t->tids_in_use, f->fs.type ? 2 : 1);
313 }
314
315 static void
316 remove_hftid(struct adapter *sc, struct filter_entry *f)
317 {
318 #ifdef INVARIANTS
319         struct tid_info *t = &sc->tids;
320
321         mtx_assert(&t->hftid_lock, MA_OWNED);
322 #endif
323
324         LIST_REMOVE(f, link_tid);
325 }
326
327 /*
328  * Input: driver's 32b filter mode.
329  * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
330  */
331 static uint16_t
332 mode_to_fconf(uint32_t mode)
333 {
334         uint32_t fconf = 0;
335
336         if (mode & T4_FILTER_IP_FRAGMENT)
337                 fconf |= F_FRAGMENTATION;
338
339         if (mode & T4_FILTER_MPS_HIT_TYPE)
340                 fconf |= F_MPSHITTYPE;
341
342         if (mode & T4_FILTER_MAC_IDX)
343                 fconf |= F_MACMATCH;
344
345         if (mode & T4_FILTER_ETH_TYPE)
346                 fconf |= F_ETHERTYPE;
347
348         if (mode & T4_FILTER_IP_PROTO)
349                 fconf |= F_PROTOCOL;
350
351         if (mode & T4_FILTER_IP_TOS)
352                 fconf |= F_TOS;
353
354         if (mode & T4_FILTER_VLAN)
355                 fconf |= F_VLAN;
356
357         if (mode & T4_FILTER_VNIC)
358                 fconf |= F_VNIC_ID;
359
360         if (mode & T4_FILTER_PORT)
361                 fconf |= F_PORT;
362
363         if (mode & T4_FILTER_FCoE)
364                 fconf |= F_FCOE;
365
366         return (fconf);
367 }
368
369 /*
370  * Input: driver's 32b filter mode.
371  * Returns: hardware vnic mode (ingress config) matching the input.
372  */
373 static int
374 mode_to_iconf(uint32_t mode)
375 {
376         if ((mode & T4_FILTER_VNIC) == 0)
377                 return (-1);    /* ingress config doesn't matter. */
378
379         if (mode & T4_FILTER_IC_VNIC)
380                 return (FW_VNIC_MODE_PF_VF);
381         else if (mode & T4_FILTER_IC_ENCAP)
382                 return (FW_VNIC_MODE_ENCAP_EN);
383         else
384                 return (FW_VNIC_MODE_OUTER_VLAN);
385 }
386
387 static int
388 check_fspec_against_fconf_iconf(struct adapter *sc,
389     struct t4_filter_specification *fs)
390 {
391         struct tp_params *tpp = &sc->params.tp;
392         uint32_t fconf = 0;
393
394         if (fs->val.frag || fs->mask.frag)
395                 fconf |= F_FRAGMENTATION;
396
397         if (fs->val.matchtype || fs->mask.matchtype)
398                 fconf |= F_MPSHITTYPE;
399
400         if (fs->val.macidx || fs->mask.macidx)
401                 fconf |= F_MACMATCH;
402
403         if (fs->val.ethtype || fs->mask.ethtype)
404                 fconf |= F_ETHERTYPE;
405
406         if (fs->val.proto || fs->mask.proto)
407                 fconf |= F_PROTOCOL;
408
409         if (fs->val.tos || fs->mask.tos)
410                 fconf |= F_TOS;
411
412         if (fs->val.vlan_vld || fs->mask.vlan_vld)
413                 fconf |= F_VLAN;
414
415         if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
416                 if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
417                         return (EINVAL);
418                 fconf |= F_VNIC_ID;
419         }
420
421         if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
422                 if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
423                         return (EINVAL);
424                 fconf |= F_VNIC_ID;
425         }
426
427 #ifdef notyet
428         if (fs->val.encap_vld || fs->mask.encap_vld) {
429                 if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
430                         return (EINVAL);
431                 fconf |= F_VNIC_ID;
432         }
433 #endif
434
435         if (fs->val.iport || fs->mask.iport)
436                 fconf |= F_PORT;
437
438         if (fs->val.fcoe || fs->mask.fcoe)
439                 fconf |= F_FCOE;
440
441         if ((tpp->filter_mode | fconf) != tpp->filter_mode)
442                 return (E2BIG);
443
444         return (0);
445 }
446
447 /*
448  * Input: hardware filter configuration (filter mode/mask, ingress config).
449  * Input: driver's 32b filter mode matching the input.
450  */
451 static uint32_t
452 fconf_to_mode(uint16_t hwmode, int vnic_mode)
453 {
454         uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
455             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
456
457         if (hwmode & F_FRAGMENTATION)
458                 mode |= T4_FILTER_IP_FRAGMENT;
459         if (hwmode & F_MPSHITTYPE)
460                 mode |= T4_FILTER_MPS_HIT_TYPE;
461         if (hwmode & F_MACMATCH)
462                 mode |= T4_FILTER_MAC_IDX;
463         if (hwmode & F_ETHERTYPE)
464                 mode |= T4_FILTER_ETH_TYPE;
465         if (hwmode & F_PROTOCOL)
466                 mode |= T4_FILTER_IP_PROTO;
467         if (hwmode & F_TOS)
468                 mode |= T4_FILTER_IP_TOS;
469         if (hwmode & F_VLAN)
470                 mode |= T4_FILTER_VLAN;
471         if (hwmode & F_VNIC_ID)
472                 mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
473         if (hwmode & F_PORT)
474                 mode |= T4_FILTER_PORT;
475         if (hwmode & F_FCOE)
476                 mode |= T4_FILTER_FCoE;
477
478         switch (vnic_mode) {
479         case FW_VNIC_MODE_PF_VF:
480                 mode |= T4_FILTER_IC_VNIC;
481                 break;
482         case FW_VNIC_MODE_ENCAP_EN:
483                 mode |= T4_FILTER_IC_ENCAP;
484                 break;
485         case FW_VNIC_MODE_OUTER_VLAN:
486         default:
487                 break;
488         }
489
490         return (mode);
491 }
492
493 int
494 get_filter_mode(struct adapter *sc, uint32_t *mode)
495 {
496         struct tp_params *tp = &sc->params.tp;
497         uint16_t filter_mode;
498
499         /* Filter mask must comply with the global filter mode. */
500         MPASS((tp->filter_mode | tp->filter_mask) == tp->filter_mode);
501
502         /* Non-zero incoming value in mode means "hashfilter mode". */
503         filter_mode = *mode ? tp->filter_mask : tp->filter_mode;
504         *mode = fconf_to_mode(filter_mode, tp->vnic_mode);
505
506         return (0);
507 }
508
509 int
510 set_filter_mode(struct adapter *sc, uint32_t mode)
511 {
512         struct tp_params *tp = &sc->params.tp;
513         int rc, iconf;
514         uint16_t fconf;
515
516         iconf = mode_to_iconf(mode);
517         fconf = mode_to_fconf(mode);
518         if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode)
519                 return (0);     /* Nothing to do */
520
521         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setfm");
522         if (rc)
523                 return (rc);
524
525         if (hw_off_limits(sc)) {
526                 rc = ENXIO;
527                 goto done;
528         }
529
530         if (sc->tids.ftids_in_use > 0 ||        /* TCAM filters active */
531             sc->tids.hpftids_in_use > 0 ||      /* hi-pri TCAM filters active */
532             sc->tids.tids_in_use > 0) {         /* TOE or hashfilters active */
533                 rc = EBUSY;
534                 goto done;
535         }
536
537 #ifdef TCP_OFFLOAD
538         if (uld_active(sc, ULD_TOM)) {
539                 rc = EBUSY;
540                 goto done;
541         }
542 #endif
543
544         /* Note that filter mask will get clipped to the new filter mode. */
545         rc = -t4_set_filter_cfg(sc, fconf, -1, iconf);
546 done:
547         end_synchronized_op(sc, 0);
548         return (rc);
549 }
550
551 int
552 set_filter_mask(struct adapter *sc, uint32_t mode)
553 {
554         struct tp_params *tp = &sc->params.tp;
555         int rc, iconf;
556         uint16_t fmask;
557
558         iconf = mode_to_iconf(mode);
559         fmask = mode_to_fconf(mode);
560         if ((iconf == -1 || iconf == tp->vnic_mode) && fmask == tp->filter_mask)
561                 return (0);     /* Nothing to do */
562
563         /*
564          * We aren't going to change the global filter mode or VNIC mode here.
565          * The given filter mask must conform to them.
566          */
567         if ((fmask | tp->filter_mode) != tp->filter_mode)
568                 return (EINVAL);
569         if (iconf != -1 && iconf != tp->vnic_mode)
570                 return (EINVAL);
571
572         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sethfm");
573         if (rc)
574                 return (rc);
575
576         if (hw_off_limits(sc)) {
577                 rc = ENXIO;
578                 goto done;
579         }
580
581         if (sc->tids.tids_in_use > 0) {         /* TOE or hashfilters active */
582                 rc = EBUSY;
583                 goto done;
584         }
585
586 #ifdef TCP_OFFLOAD
587         if (uld_active(sc, ULD_TOM)) {
588                 rc = EBUSY;
589                 goto done;
590         }
591 #endif
592         rc = -t4_set_filter_cfg(sc, -1, fmask, -1);
593 done:
594         end_synchronized_op(sc, 0);
595         return (rc);
596 }
597
598 static inline uint64_t
599 get_filter_hits(struct adapter *sc, uint32_t tid)
600 {
601         uint32_t tcb_addr;
602         uint64_t hits;
603
604         tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
605
606         mtx_lock(&sc->reg_lock);
607         if (hw_off_limits(sc))
608                 hits = 0;
609         else if (is_t4(sc)) {
610                 uint64_t t;
611
612                 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&t, 8);
613                 hits = be64toh(t);
614         } else {
615                 uint32_t t;
616
617                 read_via_memwin(sc, 0, tcb_addr + 24, &t, 4);
618                 hits = be32toh(t);
619         }
620         mtx_unlock(&sc->reg_lock);
621
622         return (hits);
623 }
624
625 int
626 get_filter(struct adapter *sc, struct t4_filter *t)
627 {
628         if (t->fs.hash)
629                 return (get_hashfilter(sc, t));
630         else
631                 return (get_tcamfilter(sc, t));
632 }
633
634 static int
635 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
636     struct smt_entry *smt)
637 {
638         struct filter_entry *f;
639         struct fw_filter2_wr *fwr;
640         u_int vnic_vld, vnic_vld_mask;
641         struct wrq_cookie cookie;
642         int i, rc, busy, locked;
643         u_int tid;
644         const int ntids = t->fs.type ? 4 : 1;
645
646         MPASS(!t->fs.hash);
647         /* Already validated against fconf, iconf */
648         MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
649         MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
650
651         if (separate_hpfilter_region(sc) && t->fs.prio) {
652                 MPASS(t->idx < sc->tids.nhpftids);
653                 f = &sc->tids.hpftid_tab[t->idx];
654                 tid = sc->tids.hpftid_base + t->idx;
655         } else {
656                 MPASS(t->idx < sc->tids.nftids);
657                 f = &sc->tids.ftid_tab[t->idx];
658                 tid = sc->tids.ftid_base + t->idx;
659         }
660         rc = busy = locked = 0;
661         mtx_lock(&sc->tids.ftid_lock);
662         for (i = 0; i < ntids; i++) {
663                 busy += f[i].pending + f[i].valid;
664                 locked += f[i].locked;
665         }
666         if (locked > 0)
667                 rc = EPERM;
668         else if (busy > 0)
669                 rc = EBUSY;
670         else {
671                 int len16;
672
673                 if (sc->params.filter2_wr_support)
674                         len16 = howmany(sizeof(struct fw_filter2_wr), 16);
675                 else
676                         len16 = howmany(sizeof(struct fw_filter_wr), 16);
677                 fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
678                 if (__predict_false(fwr == NULL))
679                         rc = ENOMEM;
680                 else {
681                         f->pending = 1;
682                         if (separate_hpfilter_region(sc) && t->fs.prio)
683                                 sc->tids.hpftids_in_use++;
684                         else
685                                 sc->tids.ftids_in_use++;
686                 }
687         }
688         mtx_unlock(&sc->tids.ftid_lock);
689         if (rc != 0)
690                 return (rc);
691
692         /*
693          * Can't fail now.  A set-filter WR will definitely be sent.
694          */
695
696         f->tid = tid;
697         f->fs = t->fs;
698         f->l2te = l2te;
699         f->smt = smt;
700
701         if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
702                 vnic_vld = 1;
703         else
704                 vnic_vld = 0;
705         if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
706                 vnic_vld_mask = 1;
707         else
708                 vnic_vld_mask = 0;
709
710         bzero(fwr, sizeof(*fwr));
711         if (sc->params.filter2_wr_support)
712                 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
713         else
714                 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
715         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
716         fwr->tid_to_iq =
717             htobe32(V_FW_FILTER_WR_TID(f->tid) |
718                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
719                 V_FW_FILTER_WR_NOREPLY(0) |
720                 V_FW_FILTER_WR_IQ(f->fs.iq));
721         fwr->del_filter_to_l2tix =
722             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
723                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
724                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
725                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
726                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
727                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
728                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
729                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
730                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
731                     f->fs.newvlan == VLAN_REWRITE) |
732                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
733                     f->fs.newvlan == VLAN_REWRITE) |
734                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
735                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
736                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
737                 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
738         fwr->ethtype = htobe16(f->fs.val.ethtype);
739         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
740         fwr->frag_to_ovlan_vldm =
741             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
742                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
743                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
744                 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
745                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
746                 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
747         fwr->smac_sel = 0;
748         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
749             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
750         fwr->maci_to_matchtypem =
751             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
752                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
753                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
754                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
755                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
756                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
757                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
758                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
759         fwr->ptcl = f->fs.val.proto;
760         fwr->ptclm = f->fs.mask.proto;
761         fwr->ttyp = f->fs.val.tos;
762         fwr->ttypm = f->fs.mask.tos;
763         fwr->ivlan = htobe16(f->fs.val.vlan);
764         fwr->ivlanm = htobe16(f->fs.mask.vlan);
765         fwr->ovlan = htobe16(f->fs.val.vnic);
766         fwr->ovlanm = htobe16(f->fs.mask.vnic);
767         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
768         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
769         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
770         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
771         fwr->lp = htobe16(f->fs.val.dport);
772         fwr->lpm = htobe16(f->fs.mask.dport);
773         fwr->fp = htobe16(f->fs.val.sport);
774         fwr->fpm = htobe16(f->fs.mask.sport);
775         /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
776         bzero(fwr->sma, sizeof (fwr->sma));
777         if (sc->params.filter2_wr_support) {
778                 fwr->filter_type_swapmac =
779                     V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
780                 fwr->natmode_to_ulp_type =
781                     V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
782                         ULP_MODE_TCPDDP : ULP_MODE_NONE) |
783                     V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
784                     V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
785                 memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
786                 memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
787                 fwr->newlport = htobe16(f->fs.nat_dport);
788                 fwr->newfport = htobe16(f->fs.nat_sport);
789                 fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
790         }
791         commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
792
793         /* Wait for response. */
794         mtx_lock(&sc->tids.ftid_lock);
795         for (;;) {
796                 if (f->pending == 0) {
797                         rc = f->valid ? 0 : EIO;
798                         break;
799                 }
800                 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
801                         rc = EINPROGRESS;
802                         break;
803                 }
804         }
805         mtx_unlock(&sc->tids.ftid_lock);
806         return (rc);
807 }
808
809 static int
810 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
811     uint64_t *ftuple)
812 {
813         struct tp_params *tp = &sc->params.tp;
814         uint16_t fmask;
815
816         *ftuple = fmask = 0;
817
818         /*
819          * Initialize each of the fields which we care about which are present
820          * in the Compressed Filter Tuple.
821          */
822         if (tp->vlan_shift >= 0 && fs->mask.vlan) {
823                 *ftuple |= (uint64_t)(F_FT_VLAN_VLD | fs->val.vlan) <<
824                     tp->vlan_shift;
825                 fmask |= F_VLAN;
826         }
827
828         if (tp->port_shift >= 0 && fs->mask.iport) {
829                 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
830                 fmask |= F_PORT;
831         }
832
833         if (tp->protocol_shift >= 0 && fs->mask.proto) {
834                 *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
835                 fmask |= F_PROTOCOL;
836         }
837
838         if (tp->tos_shift >= 0 && fs->mask.tos) {
839                 *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
840                 fmask |= F_TOS;
841         }
842
843         if (tp->vnic_shift >= 0 && fs->mask.vnic) {
844                 /* vnic_mode was already validated. */
845                 if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
846                         MPASS(fs->mask.pfvf_vld);
847                 else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
848                         MPASS(fs->mask.ovlan_vld);
849 #ifdef notyet
850                 else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
851                         MPASS(fs->mask.encap_vld);
852 #endif
853                 *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
854                 fmask |= F_VNIC_ID;
855         }
856
857         if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
858                 *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
859                 fmask |= F_MACMATCH;
860         }
861
862         if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
863                 *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
864                 fmask |= F_ETHERTYPE;
865         }
866
867         if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
868                 *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
869                 fmask |= F_MPSHITTYPE;
870         }
871
872         if (tp->frag_shift >= 0 && fs->mask.frag) {
873                 *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
874                 fmask |= F_FRAGMENTATION;
875         }
876
877         if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
878                 *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
879                 fmask |= F_FCOE;
880         }
881
882         /* A hashfilter must conform to the hardware filter mask. */
883         if (fmask != tp->filter_mask)
884                 return (EINVAL);
885
886         return (0);
887 }
888
889 static bool
890 is_4tuple_specified(struct t4_filter_specification *fs)
891 {
892         int i;
893         const int n = fs->type ? 16 : 4;
894
895         if (fs->mask.sport != 0xffff || fs->mask.dport != 0xffff)
896                 return (false);
897
898         for (i = 0; i < n; i++) {
899                 if (fs->mask.sip[i] != 0xff)
900                         return (false);
901                 if (fs->mask.dip[i] != 0xff)
902                         return (false);
903         }
904
905         return (true);
906 }
907
908 int
909 set_filter(struct adapter *sc, struct t4_filter *t)
910 {
911         struct tid_info *ti = &sc->tids;
912         struct l2t_entry *l2te = NULL;
913         struct smt_entry *smt = NULL;
914         uint64_t ftuple;
915         int rc;
916
917         /*
918          * Basic filter checks first.
919          */
920
921         if (t->fs.hash) {
922                 if (!is_hashfilter(sc) || ti->ntids == 0)
923                         return (ENOTSUP);
924                 /* Hardware, not user, selects a tid for hashfilters. */
925                 if (t->idx != (uint32_t)-1)
926                         return (EINVAL);
927                 /* T5 can't count hashfilter hits. */
928                 if (is_t5(sc) && t->fs.hitcnts)
929                         return (EINVAL);
930                 if (!is_4tuple_specified(&t->fs))
931                         return (EINVAL);
932                 rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
933                 if (rc != 0)
934                         return (rc);
935         } else {
936                 if (separate_hpfilter_region(sc) && t->fs.prio) {
937                         if (ti->nhpftids == 0)
938                                 return (ENOTSUP);
939                         if (t->idx >= ti->nhpftids)
940                                 return (EINVAL);
941                 } else {
942                         if (ti->nftids == 0)
943                                 return (ENOTSUP);
944                         if (t->idx >= ti->nftids)
945                                 return (EINVAL);
946                 }
947                 /* IPv6 filter idx must be 4 aligned */
948                 if (t->fs.type == 1 &&
949                     ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
950                         return (EINVAL);
951         }
952
953         /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
954         if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
955             (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
956             t->fs.swapmac || t->fs.nat_mode))
957                 return (ENOTSUP);
958
959         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
960                 return (EINVAL);
961         if (t->fs.val.iport >= sc->params.nports)
962                 return (EINVAL);
963
964         /* Can't specify an iqid/rss_info if not steering. */
965         if (!t->fs.dirsteer && !t->fs.dirsteerhash && !t->fs.maskhash && t->fs.iq)
966                 return (EINVAL);
967
968         /* Validate against the global filter mode and ingress config */
969         rc = check_fspec_against_fconf_iconf(sc, &t->fs);
970         if (rc != 0)
971                 return (rc);
972
973         /*
974          * Basic checks passed.  Make sure the queues and tid tables are setup.
975          */
976
977         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
978         if (rc)
979                 return (rc);
980
981         if (hw_off_limits(sc)) {
982                 rc = ENXIO;
983                 goto done;
984         }
985
986         if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
987                 goto done;
988
989         if (t->fs.hash) {
990                 if (__predict_false(ti->hftid_hash_4t == NULL)) {
991                         rc = alloc_hftid_hash(&sc->tids, HASH_NOWAIT);
992                         if (rc != 0)
993                                 goto done;
994                 }
995         } else if (separate_hpfilter_region(sc) && t->fs.prio &&
996             __predict_false(ti->hpftid_tab == NULL)) {
997                 MPASS(ti->nhpftids != 0);
998                 KASSERT(ti->hpftids_in_use == 0,
999                     ("%s: no memory allocated but hpftids_in_use is %u",
1000                     __func__, ti->hpftids_in_use));
1001                 ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
1002                     ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
1003                 if (ti->hpftid_tab == NULL) {
1004                         rc = ENOMEM;
1005                         goto done;
1006                 }
1007                 if (!mtx_initialized(&sc->tids.ftid_lock)) {
1008                         mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1009                         cv_init(&ti->ftid_cv, "t4fcv");
1010                 }
1011         } else if (__predict_false(ti->ftid_tab == NULL)) {
1012                 MPASS(ti->nftids != 0);
1013                 KASSERT(ti->ftids_in_use == 0,
1014                     ("%s: no memory allocated but ftids_in_use is %u",
1015                     __func__, ti->ftids_in_use));
1016                 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
1017                     M_CXGBE, M_NOWAIT | M_ZERO);
1018                 if (ti->ftid_tab == NULL) {
1019                         rc = ENOMEM;
1020                         goto done;
1021                 }
1022                 if (!mtx_initialized(&sc->tids.ftid_lock)) {
1023                         mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1024                         cv_init(&ti->ftid_cv, "t4fcv");
1025                 }
1026         }
1027 done:
1028         end_synchronized_op(sc, 0);
1029         if (rc != 0)
1030                 return (rc);
1031
1032         /*
1033          * Allocate L2T entry, SMT entry, etc.
1034          */
1035
1036         if (t->fs.newdmac || t->fs.newvlan) {
1037                 /* This filter needs an L2T entry; allocate one. */
1038                 l2te = t4_l2t_alloc_switching(sc, t->fs.vlan, t->fs.eport,
1039                     t->fs.dmac);
1040                 if (__predict_false(l2te == NULL)) {
1041                         rc = EAGAIN;
1042                         goto error;
1043                 }
1044         }
1045
1046         if (t->fs.newsmac) {
1047                 /* This filter needs an SMT entry; allocate one. */
1048                 smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
1049                 if (__predict_false(smt == NULL)) {
1050                         rc = EAGAIN;
1051                         goto error;
1052                 }
1053                 rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
1054                 if (rc)
1055                         goto error;
1056         }
1057
1058         if (t->fs.hash)
1059                 rc = set_hashfilter(sc, t, ftuple, l2te, smt);
1060         else
1061                 rc = set_tcamfilter(sc, t, l2te, smt);
1062
1063         if (rc != 0 && rc != EINPROGRESS) {
1064 error:
1065                 if (l2te)
1066                         t4_l2t_release(l2te);
1067                 if (smt)
1068                         t4_smt_release(smt);
1069         }
1070         return (rc);
1071 }
1072
1073 static int
1074 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
1075 {
1076         struct filter_entry *f;
1077         struct fw_filter_wr *fwr;
1078         struct wrq_cookie cookie;
1079         int rc, nfilters;
1080 #ifdef INVARIANTS
1081         u_int tid_base;
1082 #endif
1083
1084         mtx_lock(&sc->tids.ftid_lock);
1085         if (separate_hpfilter_region(sc) && t->fs.prio) {
1086                 nfilters = sc->tids.nhpftids;
1087                 f = sc->tids.hpftid_tab;
1088 #ifdef INVARIANTS
1089                 tid_base = sc->tids.hpftid_base;
1090 #endif
1091         } else {
1092                 nfilters = sc->tids.nftids;
1093                 f = sc->tids.ftid_tab;
1094 #ifdef INVARIANTS
1095                 tid_base = sc->tids.ftid_base;
1096 #endif
1097         }
1098         MPASS(f != NULL);       /* Caller checked this. */
1099         if (t->idx >= nfilters) {
1100                 rc = EINVAL;
1101                 goto done;
1102         }
1103         f += t->idx;
1104
1105         if (f->locked) {
1106                 rc = EPERM;
1107                 goto done;
1108         }
1109         if (f->pending) {
1110                 rc = EBUSY;
1111                 goto done;
1112         }
1113         if (f->valid == 0) {
1114                 rc = EINVAL;
1115                 goto done;
1116         }
1117         MPASS(f->tid == tid_base + t->idx);
1118         fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
1119         if (fwr == NULL) {
1120                 rc = ENOMEM;
1121                 goto done;
1122         }
1123
1124         bzero(fwr, sizeof (*fwr));
1125         t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
1126         f->pending = 1;
1127         commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
1128         t->fs = f->fs;  /* extra info for the caller */
1129
1130         for (;;) {
1131                 if (f->pending == 0) {
1132                         rc = f->valid ? EIO : 0;
1133                         break;
1134                 }
1135                 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
1136                         rc = EINPROGRESS;
1137                         break;
1138                 }
1139         }
1140 done:
1141         mtx_unlock(&sc->tids.ftid_lock);
1142         return (rc);
1143 }
1144
1145 int
1146 del_filter(struct adapter *sc, struct t4_filter *t)
1147 {
1148
1149         /* No filters possible if not initialized yet. */
1150         if (!(sc->flags & FULL_INIT_DONE))
1151                 return (EINVAL);
1152
1153         /*
1154          * The checks for tid tables ensure that the locks that del_* will reach
1155          * for are initialized.
1156          */
1157         if (t->fs.hash) {
1158                 if (sc->tids.hftid_hash_4t != NULL)
1159                         return (del_hashfilter(sc, t));
1160         } else if (separate_hpfilter_region(sc) && t->fs.prio) {
1161                 if (sc->tids.hpftid_tab != NULL)
1162                         return (del_tcamfilter(sc, t));
1163         } else {
1164                 if (sc->tids.ftid_tab != NULL)
1165                         return (del_tcamfilter(sc, t));
1166         }
1167
1168         return (EINVAL);
1169 }
1170
1171 /*
1172  * Release secondary resources associated with the filter.
1173  */
1174 static void
1175 free_filter_resources(struct filter_entry *f)
1176 {
1177
1178         if (f->l2te) {
1179                 t4_l2t_release(f->l2te);
1180                 f->l2te = NULL;
1181         }
1182         if (f->smt) {
1183                 t4_smt_release(f->smt);
1184                 f->smt = NULL;
1185         }
1186 }
1187
1188 static int
1189 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1190     uint64_t val, int no_reply)
1191 {
1192         struct wrq_cookie cookie;
1193         struct cpl_set_tcb_field *req;
1194
1195         req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
1196         if (req == NULL)
1197                 return (ENOMEM);
1198         bzero(req, sizeof(*req));
1199         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1200         if (no_reply == 0) {
1201                 req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1202                     V_NO_REPLY(0));
1203         } else
1204                 req->reply_ctrl = htobe16(V_NO_REPLY(1));
1205         req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1206         req->mask = htobe64(mask);
1207         req->val = htobe64(val);
1208         commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
1209
1210         return (0);
1211 }
1212
1213 /* Set one of the t_flags bits in the TCB. */
1214 static inline int
1215 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
1216     u_int no_reply)
1217 {
1218
1219         return (set_tcb_field(sc, tid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
1220             (uint64_t)val << bit_pos, no_reply));
1221 }
1222
1223 int
1224 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1225 {
1226         struct adapter *sc = iq->adapter;
1227         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1228         u_int tid = GET_TID(rpl);
1229         u_int rc, idx;
1230         struct filter_entry *f;
1231
1232         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
1233             rss->opcode));
1234
1235
1236         if (is_hpftid(sc, tid)) {
1237                 idx = tid - sc->tids.hpftid_base;
1238                 f = &sc->tids.hpftid_tab[idx];
1239         } else if (is_ftid(sc, tid)) {
1240                 idx = tid - sc->tids.ftid_base;
1241                 f = &sc->tids.ftid_tab[idx];
1242         } else
1243                 panic("%s: FW reply for invalid TID %d.", __func__, tid);
1244
1245         MPASS(f->tid == tid);
1246         rc = G_COOKIE(rpl->cookie);
1247
1248         mtx_lock(&sc->tids.ftid_lock);
1249         KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
1250             __func__, rc, tid));
1251         switch(rc) {
1252         case FW_FILTER_WR_FLT_ADDED:
1253                 /* set-filter succeeded */
1254                 f->valid = 1;
1255                 if (f->fs.newsmac) {
1256                         MPASS(f->smt != NULL);
1257                         set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1258                         set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1259                             V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1260                             V_TCB_SMAC_SEL(f->smt->idx), 1);
1261                         /* XXX: wait for reply to TCB update before !pending */
1262                 }
1263                 break;
1264         case FW_FILTER_WR_FLT_DELETED:
1265                 /* del-filter succeeded */
1266                 MPASS(f->valid == 1);
1267                 f->valid = 0;
1268                 /* Fall through */
1269         case FW_FILTER_WR_SMT_TBL_FULL:
1270                 /* set-filter failed due to lack of SMT space. */
1271                 MPASS(f->valid == 0);
1272                 free_filter_resources(f);
1273                 if (separate_hpfilter_region(sc) && f->fs.prio)
1274                         sc->tids.hpftids_in_use--;
1275                 else
1276                         sc->tids.ftids_in_use--;
1277                 break;
1278         case FW_FILTER_WR_SUCCESS:
1279         case FW_FILTER_WR_EINVAL:
1280         default:
1281                 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
1282                     idx);
1283         }
1284         f->pending = 0;
1285         cv_broadcast(&sc->tids.ftid_cv);
1286         mtx_unlock(&sc->tids.ftid_lock);
1287
1288         return (0);
1289 }
1290
1291 /*
1292  * This is the reply to the Active Open that created the filter.  Additional TCB
1293  * updates may be required to complete the filter configuration.
1294  */
1295 int
1296 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1297     struct mbuf *m)
1298 {
1299         struct adapter *sc = iq->adapter;
1300         const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1301         u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1302         u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1303         struct filter_entry *f = lookup_atid(sc, atid);
1304
1305         KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1306
1307         mtx_lock(&sc->tids.hftid_lock);
1308         KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1309         KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1310             __func__, f, f->tid));
1311         if (status == CPL_ERR_NONE) {
1312                 f->tid = GET_TID(cpl);
1313                 MPASS(lookup_hftid(sc, f->tid) == NULL);
1314                 insert_hftid(sc, f);
1315                 /*
1316                  * Leave the filter pending until it is fully set up, which will
1317                  * be indicated by the reply to the last TCB update.  No need to
1318                  * unblock the ioctl thread either.
1319                  */
1320                 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1321                         goto done;
1322                 f->valid = 1;
1323                 f->pending = 0;
1324         } else {
1325                 /* provide errno instead of tid to ioctl */
1326                 f->tid = act_open_rpl_status_to_errno(status);
1327                 f->valid = 0;
1328                 f->pending = 0;
1329                 if (act_open_has_tid(status))
1330                         release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1331                 free_filter_resources(f);
1332                 remove_hf(sc, f);
1333                 if (f->locked == 0)
1334                         free(f, M_CXGBE);
1335         }
1336         cv_broadcast(&sc->tids.hftid_cv);
1337 done:
1338         mtx_unlock(&sc->tids.hftid_lock);
1339
1340         free_atid(sc, atid);
1341         return (0);
1342 }
1343
1344 int
1345 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1346     struct mbuf *m)
1347 {
1348         struct adapter *sc = iq->adapter;
1349         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1350         u_int tid = GET_TID(rpl);
1351         struct filter_entry *f;
1352
1353         mtx_lock(&sc->tids.hftid_lock);
1354         f = lookup_hftid(sc, tid);
1355         KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1356         KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1357             f, tid));
1358         KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1359             __func__, f, tid));
1360         f->pending = 0;
1361         if (rpl->status == 0) {
1362                 f->valid = 1;
1363         } else {
1364                 f->tid = EIO;
1365                 f->valid = 0;
1366                 free_filter_resources(f);
1367                 remove_hftid(sc, f);
1368                 remove_hf(sc, f);
1369                 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1370                 if (f->locked == 0)
1371                         free(f, M_CXGBE);
1372         }
1373         cv_broadcast(&sc->tids.hftid_cv);
1374         mtx_unlock(&sc->tids.hftid_lock);
1375
1376         return (0);
1377 }
1378
1379 int
1380 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1381     struct mbuf *m)
1382 {
1383         struct adapter *sc = iq->adapter;
1384         const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1385         unsigned int tid = GET_TID(cpl);
1386         struct filter_entry *f;
1387
1388         mtx_lock(&sc->tids.hftid_lock);
1389         f = lookup_hftid(sc, tid);
1390         KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1391         KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1392             f, tid));
1393         KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1394             tid));
1395         f->pending = 0;
1396         if (cpl->status == 0) {
1397                 f->valid = 0;
1398                 free_filter_resources(f);
1399                 remove_hftid(sc, f);
1400                 remove_hf(sc, f);
1401                 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1402                 if (f->locked == 0)
1403                         free(f, M_CXGBE);
1404         }
1405         cv_broadcast(&sc->tids.hftid_cv);
1406         mtx_unlock(&sc->tids.hftid_lock);
1407
1408         return (0);
1409 }
1410
1411 static int
1412 get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1413 {
1414         int i, nfilters;
1415         struct filter_entry *f;
1416         u_int in_use;
1417 #ifdef INVARIANTS
1418         u_int tid_base;
1419 #endif
1420
1421         MPASS(!t->fs.hash);
1422
1423         if (separate_hpfilter_region(sc) && t->fs.prio) {
1424                 nfilters = sc->tids.nhpftids;
1425                 f = sc->tids.hpftid_tab;
1426                 in_use = sc->tids.hpftids_in_use;
1427 #ifdef INVARIANTS
1428                 tid_base = sc->tids.hpftid_base;
1429 #endif
1430         } else {
1431                 nfilters = sc->tids.nftids;
1432                 f = sc->tids.ftid_tab;
1433                 in_use = sc->tids.ftids_in_use;
1434 #ifdef INVARIANTS
1435                 tid_base = sc->tids.ftid_base;
1436 #endif
1437         }
1438
1439         if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1440                 t->idx = 0xffffffff;
1441                 return (0);
1442         }
1443
1444         f += t->idx;
1445         mtx_lock(&sc->tids.ftid_lock);
1446         for (i = t->idx; i < nfilters; i++, f++) {
1447                 if (f->valid) {
1448                         MPASS(f->tid == tid_base + i);
1449                         t->idx = i;
1450                         t->l2tidx = f->l2te ? f->l2te->idx : 0;
1451                         t->smtidx = f->smt ? f->smt->idx : 0;
1452                         if (f->fs.hitcnts)
1453                                 t->hits = get_filter_hits(sc, f->tid);
1454                         else
1455                                 t->hits = UINT64_MAX;
1456                         t->fs = f->fs;
1457
1458                         goto done;
1459                 }
1460         }
1461         t->idx = 0xffffffff;
1462 done:
1463         mtx_unlock(&sc->tids.ftid_lock);
1464         return (0);
1465 }
1466
1467 static int
1468 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1469 {
1470         struct tid_info *ti = &sc->tids;
1471         int tid;
1472         struct filter_entry *f;
1473         const int inv_tid = ti->ntids + ti->tid_base;
1474
1475         MPASS(t->fs.hash);
1476
1477         if (ti->tids_in_use == 0 || ti->hftid_hash_tid == NULL ||
1478             t->idx >= inv_tid) {
1479                 t->idx = 0xffffffff;
1480                 return (0);
1481         }
1482         if (t->idx < ti->tid_base)
1483                 t->idx = ti->tid_base;
1484
1485         mtx_lock(&ti->hftid_lock);
1486         for (tid = t->idx; tid < inv_tid; tid++) {
1487                 f = lookup_hftid(sc, tid);
1488                 if (f != NULL && f->valid) {
1489                         t->idx = tid;
1490                         t->l2tidx = f->l2te ? f->l2te->idx : 0;
1491                         t->smtidx = f->smt ? f->smt->idx : 0;
1492                         if (f->fs.hitcnts)
1493                                 t->hits = get_filter_hits(sc, tid);
1494                         else
1495                                 t->hits = UINT64_MAX;
1496                         t->fs = f->fs;
1497
1498                         goto done;
1499                 }
1500         }
1501         t->idx = 0xffffffff;
1502 done:
1503         mtx_unlock(&ti->hftid_lock);
1504         return (0);
1505 }
1506
1507 static void
1508 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1509     uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1510 {
1511         struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1512         struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1513
1514         /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1515         MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1516         MPASS(atid >= 0);
1517
1518         if (chip_id(sc) == CHELSIO_T5) {
1519                 INIT_TP_WR(cpl5, 0);
1520         } else {
1521                 INIT_TP_WR(cpl6, 0);
1522                 cpl6->rsvd2 = 0;
1523                 cpl6->opt3 = 0;
1524         }
1525
1526         OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1527             V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1528             V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1529         cpl->local_port = htobe16(f->fs.val.dport);
1530         cpl->peer_port = htobe16(f->fs.val.sport);
1531         cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1532         cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1533         cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1534         cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1535         cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1536             f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1537             V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1538             V_NO_CONG(f->fs.rpttid) |
1539             V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1540             F_TCAM_BYPASS | F_NON_OFFLOAD);
1541
1542         cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1543         cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1544             V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1545             V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1546             F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1547             V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1548             V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1549 }
1550
1551 static void
1552 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1553     uint64_t ftuple, struct cpl_act_open_req *cpl)
1554 {
1555         struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1556         struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1557
1558         /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1559         MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1560         MPASS(atid >= 0);
1561
1562         if (chip_id(sc) == CHELSIO_T5) {
1563                 INIT_TP_WR(cpl5, 0);
1564         } else {
1565                 INIT_TP_WR(cpl6, 0);
1566                 cpl6->rsvd2 = 0;
1567                 cpl6->opt3 = 0;
1568         }
1569
1570         OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1571             V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1572             V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1573         cpl->local_port = htobe16(f->fs.val.dport);
1574         cpl->peer_port = htobe16(f->fs.val.sport);
1575         cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1576             f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1577         cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1578                 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1579         cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1580             f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1581             V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1582             V_NO_CONG(f->fs.rpttid) |
1583             V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1584             F_TCAM_BYPASS | F_NON_OFFLOAD);
1585
1586         cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1587         cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1588             V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1589             V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1590             F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1591             V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1592             V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1593 }
1594
1595 static int
1596 act_open_cpl_len16(struct adapter *sc, int isipv6)
1597 {
1598         int idx;
1599         static const int sz_table[3][2] = {
1600                 {
1601                         howmany(sizeof (struct cpl_act_open_req), 16),
1602                         howmany(sizeof (struct cpl_act_open_req6), 16)
1603                 },
1604                 {
1605                         howmany(sizeof (struct cpl_t5_act_open_req), 16),
1606                         howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1607                 },
1608                 {
1609                         howmany(sizeof (struct cpl_t6_act_open_req), 16),
1610                         howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1611                 },
1612         };
1613
1614         MPASS(chip_id(sc) >= CHELSIO_T4);
1615         idx = min(chip_id(sc) - CHELSIO_T4, 2);
1616
1617         return (sz_table[idx][!!isipv6]);
1618 }
1619
1620 static int
1621 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1622     struct l2t_entry *l2te, struct smt_entry *smt)
1623 {
1624         void *wr;
1625         struct wrq_cookie cookie;
1626         struct filter_entry *f;
1627         int rc, atid = -1;
1628         uint32_t hash;
1629
1630         MPASS(t->fs.hash);
1631         /* Already validated against fconf, iconf */
1632         MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1633         MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1634
1635         hash = hf_hashfn_4t(&t->fs);
1636
1637         mtx_lock(&sc->tids.hftid_lock);
1638         if (lookup_hf(sc, &t->fs, hash) != NULL) {
1639                 rc = EEXIST;
1640                 goto done;
1641         }
1642
1643         f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1644         if (__predict_false(f == NULL)) {
1645                 rc = ENOMEM;
1646                 goto done;
1647         }
1648         f->fs = t->fs;
1649         f->l2te = l2te;
1650         f->smt = smt;
1651
1652         atid = alloc_atid(sc, f);
1653         if (__predict_false(atid) == -1) {
1654                 free(f, M_CXGBE);
1655                 rc = EAGAIN;
1656                 goto done;
1657         }
1658         MPASS(atid >= 0);
1659
1660         wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1661             &cookie);
1662         if (wr == NULL) {
1663                 free_atid(sc, atid);
1664                 free(f, M_CXGBE);
1665                 rc = ENOMEM;
1666                 goto done;
1667         }
1668         if (f->fs.type)
1669                 mk_act_open_req6(sc, f, atid, ftuple, wr);
1670         else
1671                 mk_act_open_req(sc, f, atid, ftuple, wr);
1672
1673         f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1674         f->pending = 1;
1675         f->tid = -1;
1676         insert_hf(sc, f, hash);
1677         commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1678
1679         for (;;) {
1680                 MPASS(f->locked);
1681                 if (f->pending == 0) {
1682                         if (f->valid) {
1683                                 rc = 0;
1684                                 f->locked = 0;
1685                                 t->idx = f->tid;
1686                         } else {
1687                                 rc = f->tid;
1688                                 free(f, M_CXGBE);
1689                         }
1690                         break;
1691                 }
1692                 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1693                         f->locked = 0;
1694                         rc = EINPROGRESS;
1695                         break;
1696                 }
1697         }
1698 done:
1699         mtx_unlock(&sc->tids.hftid_lock);
1700         return (rc);
1701 }
1702
1703 /* SET_TCB_FIELD sent as a ULP command looks like this */
1704 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1705     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1706
1707 static void *
1708 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1709                 uint64_t val, uint32_t tid, uint32_t qid)
1710 {
1711         struct ulptx_idata *ulpsc;
1712         struct cpl_set_tcb_field_core *req;
1713
1714         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1715         ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1716
1717         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1718         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1719         ulpsc->len = htobe32(sizeof(*req));
1720
1721         req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1722         OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1723         req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1724         req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1725         req->mask = htobe64(mask);
1726         req->val = htobe64(val);
1727
1728         ulpsc = (struct ulptx_idata *)(req + 1);
1729         if (LEN__SET_TCB_FIELD_ULP % 16) {
1730                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1731                 ulpsc->len = htobe32(0);
1732                 return (ulpsc + 1);
1733         }
1734         return (ulpsc);
1735 }
1736
1737 /* ABORT_REQ sent as a ULP command looks like this */
1738 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1739         sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1740
1741 static void *
1742 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1743 {
1744         struct ulptx_idata *ulpsc;
1745         struct cpl_abort_req_core *req;
1746
1747         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1748         ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1749
1750         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1751         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1752         ulpsc->len = htobe32(sizeof(*req));
1753
1754         req = (struct cpl_abort_req_core *)(ulpsc + 1);
1755         OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1756         req->rsvd0 = htonl(0);
1757         req->rsvd1 = 0;
1758         req->cmd = CPL_ABORT_NO_RST;
1759
1760         ulpsc = (struct ulptx_idata *)(req + 1);
1761         if (LEN__ABORT_REQ_ULP % 16) {
1762                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1763                 ulpsc->len = htobe32(0);
1764                 return (ulpsc + 1);
1765         }
1766         return (ulpsc);
1767 }
1768
1769 /* ABORT_RPL sent as a ULP command looks like this */
1770 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1771         sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1772
1773 static void *
1774 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1775 {
1776         struct ulptx_idata *ulpsc;
1777         struct cpl_abort_rpl_core *rpl;
1778
1779         ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1780         ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1781
1782         ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1783         ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1784         ulpsc->len = htobe32(sizeof(*rpl));
1785
1786         rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1787         OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1788         rpl->rsvd0 = htonl(0);
1789         rpl->rsvd1 = 0;
1790         rpl->cmd = CPL_ABORT_NO_RST;
1791
1792         ulpsc = (struct ulptx_idata *)(rpl + 1);
1793         if (LEN__ABORT_RPL_ULP % 16) {
1794                 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1795                 ulpsc->len = htobe32(0);
1796                 return (ulpsc + 1);
1797         }
1798         return (ulpsc);
1799 }
1800
1801 static inline int
1802 del_hashfilter_wrlen(void)
1803 {
1804
1805         return (sizeof(struct work_request_hdr) +
1806             roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1807             roundup2(LEN__ABORT_REQ_ULP, 16) +
1808             roundup2(LEN__ABORT_RPL_ULP, 16));
1809 }
1810
1811 static void
1812 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1813 {
1814         struct ulp_txpkt *ulpmc;
1815
1816         INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1817         ulpmc = (struct ulp_txpkt *)(wrh + 1);
1818         ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1819             V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1820         ulpmc = mk_abort_req_ulp(ulpmc, tid);
1821         ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1822 }
1823
1824 static int
1825 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1826 {
1827         struct tid_info *ti = &sc->tids;
1828         void *wr;
1829         struct filter_entry *f;
1830         struct wrq_cookie cookie;
1831         int rc;
1832         const int wrlen = del_hashfilter_wrlen();
1833         const int inv_tid = ti->ntids + ti->tid_base;
1834
1835         MPASS(sc->tids.hftid_hash_4t != NULL);
1836         MPASS(sc->tids.ntids > 0);
1837
1838         if (t->idx < sc->tids.tid_base || t->idx >= inv_tid)
1839                 return (EINVAL);
1840
1841         mtx_lock(&ti->hftid_lock);
1842         f = lookup_hftid(sc, t->idx);
1843         if (f == NULL || f->valid == 0) {
1844                 rc = EINVAL;
1845                 goto done;
1846         }
1847         MPASS(f->tid == t->idx);
1848         if (f->locked) {
1849                 rc = EPERM;
1850                 goto done;
1851         }
1852         if (f->pending) {
1853                 rc = EBUSY;
1854                 goto done;
1855         }
1856         wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
1857         if (wr == NULL) {
1858                 rc = ENOMEM;
1859                 goto done;
1860         }
1861
1862         mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1863         f->locked = 1;
1864         f->pending = 1;
1865         commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1866         t->fs = f->fs;  /* extra info for the caller */
1867
1868         for (;;) {
1869                 MPASS(f->locked);
1870                 if (f->pending == 0) {
1871                         if (f->valid) {
1872                                 f->locked = 0;
1873                                 rc = EIO;
1874                         } else {
1875                                 rc = 0;
1876                                 free(f, M_CXGBE);
1877                         }
1878                         break;
1879                 }
1880                 if (cv_wait_sig(&ti->hftid_cv, &ti->hftid_lock) != 0) {
1881                         f->locked = 0;
1882                         rc = EINPROGRESS;
1883                         break;
1884                 }
1885         }
1886 done:
1887         mtx_unlock(&ti->hftid_lock);
1888         return (rc);
1889 }
1890
1891 #define WORD_MASK       0xffffffff
1892 static void
1893 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1894     const bool sip, const bool dp, const bool sp)
1895 {
1896
1897         if (dip) {
1898                 if (f->fs.type) {
1899                         set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1900                             f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1901                             f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1902
1903                         set_tcb_field(sc, f->tid,
1904                             W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1905                             f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1906                             f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1907
1908                         set_tcb_field(sc, f->tid,
1909                             W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1910                             f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1911                             f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1912
1913                         set_tcb_field(sc, f->tid,
1914                             W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1915                             f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1916                             f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1917                 } else {
1918                         set_tcb_field(sc, f->tid,
1919                             W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1920                             f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1921                             f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1922                 }
1923         }
1924
1925         if (sip) {
1926                 if (f->fs.type) {
1927                         set_tcb_field(sc, f->tid,
1928                             W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1929                             f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1930                             f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1931
1932                         set_tcb_field(sc, f->tid,
1933                             W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1934                             f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1935                             f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1936
1937                         set_tcb_field(sc, f->tid,
1938                             W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1939                             f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1940                             f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1941
1942                         set_tcb_field(sc, f->tid,
1943                             W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1944                             f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1945                             f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1946
1947                 } else {
1948                         set_tcb_field(sc, f->tid,
1949                             W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1950                             f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1951                             f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1952                 }
1953         }
1954
1955         set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1956             (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1957 }
1958
1959 /*
1960  * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1961  * last of the series of updates requested a reply.  The reply informs the
1962  * driver that the filter is fully setup.
1963  */
1964 static int
1965 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1966 {
1967         int updated = 0;
1968
1969         MPASS(f->tid < sc->tids.ntids);
1970         MPASS(f->fs.hash);
1971         MPASS(f->pending);
1972         MPASS(f->valid == 0);
1973
1974         if (f->fs.newdmac) {
1975                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1976                 updated++;
1977         }
1978
1979         if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1980                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1981                 updated++;
1982         }
1983
1984         if (f->fs.newsmac) {
1985                 MPASS(f->smt != NULL);
1986                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1987                 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1988                     V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1989                     1);
1990                 updated++;
1991         }
1992
1993         switch(f->fs.nat_mode) {
1994         case NAT_MODE_NONE:
1995                 break;
1996         case NAT_MODE_DIP:
1997                 set_nat_params(sc, f, true, false, false, false);
1998                 updated++;
1999                 break;
2000         case NAT_MODE_DIP_DP:
2001                 set_nat_params(sc, f, true, false, true, false);
2002                 updated++;
2003                 break;
2004         case NAT_MODE_DIP_DP_SIP:
2005                 set_nat_params(sc, f, true, true, true, false);
2006                 updated++;
2007                 break;
2008         case NAT_MODE_DIP_DP_SP:
2009                 set_nat_params(sc, f, true, false, true, true);
2010                 updated++;
2011                 break;
2012         case NAT_MODE_SIP_SP:
2013                 set_nat_params(sc, f, false, true, false, true);
2014                 updated++;
2015                 break;
2016         case NAT_MODE_DIP_SIP_SP:
2017                 set_nat_params(sc, f, true, true, false, true);
2018                 updated++;
2019                 break;
2020         case NAT_MODE_ALL:
2021                 set_nat_params(sc, f, true, true, true, true);
2022                 updated++;
2023                 break;
2024         default:
2025                 MPASS(0);       /* should have been validated earlier */
2026                 break;
2027
2028         }
2029
2030         if (f->fs.nat_seq_chk) {
2031                 set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
2032                     V_TCB_RCV_NXT(M_TCB_RCV_NXT),
2033                     V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
2034                 updated++;
2035         }
2036
2037         if (is_t5(sc) && f->fs.action == FILTER_DROP) {
2038                 /*
2039                  * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
2040                  */
2041                 set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
2042                     V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
2043                 updated++;
2044         }
2045
2046         /*
2047          * Enable switching after all secondary resources (L2T entry, SMT entry,
2048          * etc.) are setup so that any switched packet will use correct
2049          * values.
2050          */
2051         if (f->fs.action == FILTER_SWITCH) {
2052                 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
2053                 updated++;
2054         }
2055
2056         if (f->fs.hitcnts || updated > 0) {
2057                 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
2058                     V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
2059                     V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
2060                     V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
2061                 return (EINPROGRESS);
2062         }
2063
2064         return (0);
2065 }