]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/t4_filter.c
cxgbe(4): Move all TCAM filter code into a separate file.
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / t4_filter.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
44 #include <sys/sbuf.h>
45 #include <netinet/in.h>
46
47 #include "common/common.h"
48 #include "common/t4_msg.h"
49 #include "common/t4_regs.h"
50 #include "t4_l2t.h"
51
52 struct filter_entry {
53         uint32_t valid:1;       /* filter allocated and valid */
54         uint32_t locked:1;      /* filter is administratively locked */
55         uint32_t pending:1;     /* filter action is pending firmware reply */
56         uint32_t smtidx:8;      /* Source MAC Table index for smac */
57         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
58
59         struct t4_filter_specification fs;
60 };
61
62 static uint32_t
63 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf)
64 {
65         uint32_t mode;
66
67         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
68             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
69
70         if (fconf & F_FRAGMENTATION)
71                 mode |= T4_FILTER_IP_FRAGMENT;
72
73         if (fconf & F_MPSHITTYPE)
74                 mode |= T4_FILTER_MPS_HIT_TYPE;
75
76         if (fconf & F_MACMATCH)
77                 mode |= T4_FILTER_MAC_IDX;
78
79         if (fconf & F_ETHERTYPE)
80                 mode |= T4_FILTER_ETH_TYPE;
81
82         if (fconf & F_PROTOCOL)
83                 mode |= T4_FILTER_IP_PROTO;
84
85         if (fconf & F_TOS)
86                 mode |= T4_FILTER_IP_TOS;
87
88         if (fconf & F_VLAN)
89                 mode |= T4_FILTER_VLAN;
90
91         if (fconf & F_VNIC_ID) {
92                 mode |= T4_FILTER_VNIC;
93                 if (iconf & F_VNIC)
94                         mode |= T4_FILTER_IC_VNIC;
95         }
96
97         if (fconf & F_PORT)
98                 mode |= T4_FILTER_PORT;
99
100         if (fconf & F_FCOE)
101                 mode |= T4_FILTER_FCoE;
102
103         return (mode);
104 }
105
106 static uint32_t
107 mode_to_fconf(uint32_t mode)
108 {
109         uint32_t fconf = 0;
110
111         if (mode & T4_FILTER_IP_FRAGMENT)
112                 fconf |= F_FRAGMENTATION;
113
114         if (mode & T4_FILTER_MPS_HIT_TYPE)
115                 fconf |= F_MPSHITTYPE;
116
117         if (mode & T4_FILTER_MAC_IDX)
118                 fconf |= F_MACMATCH;
119
120         if (mode & T4_FILTER_ETH_TYPE)
121                 fconf |= F_ETHERTYPE;
122
123         if (mode & T4_FILTER_IP_PROTO)
124                 fconf |= F_PROTOCOL;
125
126         if (mode & T4_FILTER_IP_TOS)
127                 fconf |= F_TOS;
128
129         if (mode & T4_FILTER_VLAN)
130                 fconf |= F_VLAN;
131
132         if (mode & T4_FILTER_VNIC)
133                 fconf |= F_VNIC_ID;
134
135         if (mode & T4_FILTER_PORT)
136                 fconf |= F_PORT;
137
138         if (mode & T4_FILTER_FCoE)
139                 fconf |= F_FCOE;
140
141         return (fconf);
142 }
143
144 static uint32_t
145 mode_to_iconf(uint32_t mode)
146 {
147
148         if (mode & T4_FILTER_IC_VNIC)
149                 return (F_VNIC);
150         return (0);
151 }
152
153 static int check_fspec_against_fconf_iconf(struct adapter *sc,
154     struct t4_filter_specification *fs)
155 {
156         struct tp_params *tpp = &sc->params.tp;
157         uint32_t fconf = 0;
158
159         if (fs->val.frag || fs->mask.frag)
160                 fconf |= F_FRAGMENTATION;
161
162         if (fs->val.matchtype || fs->mask.matchtype)
163                 fconf |= F_MPSHITTYPE;
164
165         if (fs->val.macidx || fs->mask.macidx)
166                 fconf |= F_MACMATCH;
167
168         if (fs->val.ethtype || fs->mask.ethtype)
169                 fconf |= F_ETHERTYPE;
170
171         if (fs->val.proto || fs->mask.proto)
172                 fconf |= F_PROTOCOL;
173
174         if (fs->val.tos || fs->mask.tos)
175                 fconf |= F_TOS;
176
177         if (fs->val.vlan_vld || fs->mask.vlan_vld)
178                 fconf |= F_VLAN;
179
180         if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
181                 fconf |= F_VNIC_ID;
182                 if (tpp->ingress_config & F_VNIC)
183                         return (EINVAL);
184         }
185
186         if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
187                 fconf |= F_VNIC_ID;
188                 if ((tpp->ingress_config & F_VNIC) == 0)
189                         return (EINVAL);
190         }
191
192         if (fs->val.iport || fs->mask.iport)
193                 fconf |= F_PORT;
194
195         if (fs->val.fcoe || fs->mask.fcoe)
196                 fconf |= F_FCOE;
197
198         if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
199                 return (E2BIG);
200
201         return (0);
202 }
203
204 int
205 get_filter_mode(struct adapter *sc, uint32_t *mode)
206 {
207         struct tp_params *tpp = &sc->params.tp;
208
209         /*
210          * We trust the cached values of the relevant TP registers.  This means
211          * things work reliably only if writes to those registers are always via
212          * t4_set_filter_mode_.
213          */
214         *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config);
215
216         return (0);
217 }
218
219 int
220 set_filter_mode(struct adapter *sc, uint32_t mode)
221 {
222         struct tp_params *tpp = &sc->params.tp;
223         uint32_t fconf, iconf;
224         int rc;
225
226         iconf = mode_to_iconf(mode);
227         if ((iconf ^ tpp->ingress_config) & F_VNIC) {
228                 /*
229                  * For now we just complain if A_TP_INGRESS_CONFIG is not
230                  * already set to the correct value for the requested filter
231                  * mode.  It's not clear if it's safe to write to this register
232                  * on the fly.  (And we trust the cached value of the register).
233                  */
234                 return (EBUSY);
235         }
236
237         fconf = mode_to_fconf(mode);
238
239         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
240             "t4setfm");
241         if (rc)
242                 return (rc);
243
244         if (sc->tids.ftids_in_use > 0) {
245                 rc = EBUSY;
246                 goto done;
247         }
248
249 #ifdef TCP_OFFLOAD
250         if (uld_active(sc, ULD_TOM)) {
251                 rc = EBUSY;
252                 goto done;
253         }
254 #endif
255
256         rc = -t4_set_filter_mode(sc, fconf, true);
257 done:
258         end_synchronized_op(sc, LOCK_HELD);
259         return (rc);
260 }
261
262 static inline uint64_t
263 get_filter_hits(struct adapter *sc, uint32_t fid)
264 {
265         uint32_t tcb_addr;
266
267         tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) +
268             (fid + sc->tids.ftid_base) * TCB_SIZE;
269
270         if (is_t4(sc)) {
271                 uint64_t hits;
272
273                 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
274                 return (be64toh(hits));
275         } else {
276                 uint32_t hits;
277
278                 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
279                 return (be32toh(hits));
280         }
281 }
282
283 int
284 get_filter(struct adapter *sc, struct t4_filter *t)
285 {
286         int i, rc, nfilters = sc->tids.nftids;
287         struct filter_entry *f;
288
289         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
290             "t4getf");
291         if (rc)
292                 return (rc);
293
294         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
295             t->idx >= nfilters) {
296                 t->idx = 0xffffffff;
297                 goto done;
298         }
299
300         f = &sc->tids.ftid_tab[t->idx];
301         for (i = t->idx; i < nfilters; i++, f++) {
302                 if (f->valid) {
303                         t->idx = i;
304                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
305                         t->smtidx = f->smtidx;
306                         if (f->fs.hitcnts)
307                                 t->hits = get_filter_hits(sc, t->idx);
308                         else
309                                 t->hits = UINT64_MAX;
310                         t->fs = f->fs;
311
312                         goto done;
313                 }
314         }
315
316         t->idx = 0xffffffff;
317 done:
318         end_synchronized_op(sc, LOCK_HELD);
319         return (0);
320 }
321
322 static int
323 set_filter_wr(struct adapter *sc, int fidx)
324 {
325         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
326         struct fw_filter_wr *fwr;
327         unsigned int ftid, vnic_vld, vnic_vld_mask;
328         struct wrq_cookie cookie;
329
330         ASSERT_SYNCHRONIZED_OP(sc);
331
332         if (f->fs.newdmac || f->fs.newvlan) {
333                 /* This filter needs an L2T entry; allocate one. */
334                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
335                 if (f->l2t == NULL)
336                         return (EAGAIN);
337                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
338                     f->fs.dmac)) {
339                         t4_l2t_release(f->l2t);
340                         f->l2t = NULL;
341                         return (ENOMEM);
342                 }
343         }
344
345         /* Already validated against fconf, iconf */
346         MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0);
347         MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0);
348         if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld)
349                 vnic_vld = 1;
350         else
351                 vnic_vld = 0;
352         if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld)
353                 vnic_vld_mask = 1;
354         else
355                 vnic_vld_mask = 0;
356
357         ftid = sc->tids.ftid_base + fidx;
358
359         fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
360         if (fwr == NULL)
361                 return (ENOMEM);
362         bzero(fwr, sizeof(*fwr));
363
364         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
365         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
366         fwr->tid_to_iq =
367             htobe32(V_FW_FILTER_WR_TID(ftid) |
368                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
369                 V_FW_FILTER_WR_NOREPLY(0) |
370                 V_FW_FILTER_WR_IQ(f->fs.iq));
371         fwr->del_filter_to_l2tix =
372             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
373                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
374                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
375                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
376                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
377                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
378                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
379                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
380                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
381                     f->fs.newvlan == VLAN_REWRITE) |
382                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
383                     f->fs.newvlan == VLAN_REWRITE) |
384                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
385                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
386                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
387                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
388         fwr->ethtype = htobe16(f->fs.val.ethtype);
389         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
390         fwr->frag_to_ovlan_vldm =
391             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
392                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
393                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
394                 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
395                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
396                 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
397         fwr->smac_sel = 0;
398         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
399             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
400         fwr->maci_to_matchtypem =
401             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
402                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
403                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
404                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
405                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
406                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
407                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
408                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
409         fwr->ptcl = f->fs.val.proto;
410         fwr->ptclm = f->fs.mask.proto;
411         fwr->ttyp = f->fs.val.tos;
412         fwr->ttypm = f->fs.mask.tos;
413         fwr->ivlan = htobe16(f->fs.val.vlan);
414         fwr->ivlanm = htobe16(f->fs.mask.vlan);
415         fwr->ovlan = htobe16(f->fs.val.vnic);
416         fwr->ovlanm = htobe16(f->fs.mask.vnic);
417         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
418         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
419         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
420         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
421         fwr->lp = htobe16(f->fs.val.dport);
422         fwr->lpm = htobe16(f->fs.mask.dport);
423         fwr->fp = htobe16(f->fs.val.sport);
424         fwr->fpm = htobe16(f->fs.mask.sport);
425         if (f->fs.newsmac)
426                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
427
428         f->pending = 1;
429         sc->tids.ftids_in_use++;
430
431         commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
432         return (0);
433 }
434
435 int
436 set_filter(struct adapter *sc, struct t4_filter *t)
437 {
438         unsigned int nfilters, nports;
439         struct filter_entry *f;
440         int i, rc;
441
442         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
443         if (rc)
444                 return (rc);
445
446         nfilters = sc->tids.nftids;
447         nports = sc->params.nports;
448
449         if (nfilters == 0) {
450                 rc = ENOTSUP;
451                 goto done;
452         }
453
454         if (t->idx >= nfilters) {
455                 rc = EINVAL;
456                 goto done;
457         }
458
459         /* Validate against the global filter mode and ingress config */
460         rc = check_fspec_against_fconf_iconf(sc, &t->fs);
461         if (rc != 0)
462                 goto done;
463
464         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
465                 rc = EINVAL;
466                 goto done;
467         }
468
469         if (t->fs.val.iport >= nports) {
470                 rc = EINVAL;
471                 goto done;
472         }
473
474         /* Can't specify an iq if not steering to it */
475         if (!t->fs.dirsteer && t->fs.iq) {
476                 rc = EINVAL;
477                 goto done;
478         }
479
480         /* IPv6 filter idx must be 4 aligned */
481         if (t->fs.type == 1 &&
482             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
483                 rc = EINVAL;
484                 goto done;
485         }
486
487         if (!(sc->flags & FULL_INIT_DONE) &&
488             ((rc = adapter_full_init(sc)) != 0))
489                 goto done;
490
491         if (sc->tids.ftid_tab == NULL) {
492                 KASSERT(sc->tids.ftids_in_use == 0,
493                     ("%s: no memory allocated but filters_in_use > 0",
494                     __func__));
495
496                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
497                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
498                 if (sc->tids.ftid_tab == NULL) {
499                         rc = ENOMEM;
500                         goto done;
501                 }
502                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
503         }
504
505         for (i = 0; i < 4; i++) {
506                 f = &sc->tids.ftid_tab[t->idx + i];
507
508                 if (f->pending || f->valid) {
509                         rc = EBUSY;
510                         goto done;
511                 }
512                 if (f->locked) {
513                         rc = EPERM;
514                         goto done;
515                 }
516
517                 if (t->fs.type == 0)
518                         break;
519         }
520
521         f = &sc->tids.ftid_tab[t->idx];
522         f->fs = t->fs;
523
524         rc = set_filter_wr(sc, t->idx);
525 done:
526         end_synchronized_op(sc, 0);
527
528         if (rc == 0) {
529                 mtx_lock(&sc->tids.ftid_lock);
530                 for (;;) {
531                         if (f->pending == 0) {
532                                 rc = f->valid ? 0 : EIO;
533                                 break;
534                         }
535
536                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
537                             PCATCH, "t4setfw", 0)) {
538                                 rc = EINPROGRESS;
539                                 break;
540                         }
541                 }
542                 mtx_unlock(&sc->tids.ftid_lock);
543         }
544         return (rc);
545 }
546
547 static int
548 del_filter_wr(struct adapter *sc, int fidx)
549 {
550         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
551         struct fw_filter_wr *fwr;
552         unsigned int ftid;
553         struct wrq_cookie cookie;
554
555         ftid = sc->tids.ftid_base + fidx;
556
557         fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
558         if (fwr == NULL)
559                 return (ENOMEM);
560         bzero(fwr, sizeof (*fwr));
561
562         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
563
564         f->pending = 1;
565         commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
566         return (0);
567 }
568
569 int
570 del_filter(struct adapter *sc, struct t4_filter *t)
571 {
572         unsigned int nfilters;
573         struct filter_entry *f;
574         int rc;
575
576         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
577         if (rc)
578                 return (rc);
579
580         nfilters = sc->tids.nftids;
581
582         if (nfilters == 0) {
583                 rc = ENOTSUP;
584                 goto done;
585         }
586
587         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
588             t->idx >= nfilters) {
589                 rc = EINVAL;
590                 goto done;
591         }
592
593         if (!(sc->flags & FULL_INIT_DONE)) {
594                 rc = EAGAIN;
595                 goto done;
596         }
597
598         f = &sc->tids.ftid_tab[t->idx];
599
600         if (f->pending) {
601                 rc = EBUSY;
602                 goto done;
603         }
604         if (f->locked) {
605                 rc = EPERM;
606                 goto done;
607         }
608
609         if (f->valid) {
610                 t->fs = f->fs;  /* extra info for the caller */
611                 rc = del_filter_wr(sc, t->idx);
612         }
613
614 done:
615         end_synchronized_op(sc, 0);
616
617         if (rc == 0) {
618                 mtx_lock(&sc->tids.ftid_lock);
619                 for (;;) {
620                         if (f->pending == 0) {
621                                 rc = f->valid ? EIO : 0;
622                                 break;
623                         }
624
625                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
626                             PCATCH, "t4delfw", 0)) {
627                                 rc = EINPROGRESS;
628                                 break;
629                         }
630                 }
631                 mtx_unlock(&sc->tids.ftid_lock);
632         }
633
634         return (rc);
635 }
636
637 static void
638 clear_filter(struct filter_entry *f)
639 {
640         if (f->l2t)
641                 t4_l2t_release(f->l2t);
642
643         bzero(f, sizeof (*f));
644 }
645
646 int
647 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
648 {
649         struct adapter *sc = iq->adapter;
650         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
651         unsigned int idx = GET_TID(rpl);
652         unsigned int rc;
653         struct filter_entry *f;
654
655         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
656             rss->opcode));
657         MPASS(iq == &sc->sge.fwq);
658         MPASS(is_ftid(sc, idx));
659
660         idx -= sc->tids.ftid_base;
661         f = &sc->tids.ftid_tab[idx];
662         rc = G_COOKIE(rpl->cookie);
663
664         mtx_lock(&sc->tids.ftid_lock);
665         if (rc == FW_FILTER_WR_FLT_ADDED) {
666                 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
667                     __func__, idx));
668                 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
669                 f->pending = 0;  /* asynchronous setup completed */
670                 f->valid = 1;
671         } else {
672                 if (rc != FW_FILTER_WR_FLT_DELETED) {
673                         /* Add or delete failed, display an error */
674                         log(LOG_ERR,
675                             "filter %u setup failed with error %u\n",
676                             idx, rc);
677                 }
678
679                 clear_filter(f);
680                 sc->tids.ftids_in_use--;
681         }
682         wakeup(&sc->tids.ftid_tab);
683         mtx_unlock(&sc->tids.ftid_lock);
684
685         return (0);
686 }