]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlxge/qls_isr.c
Import DTS files from Linux 5.0
[FreeBSD/FreeBSD.git] / sys / dev / qlxge / qls_isr.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5  *
6  * Copyright (c) 2013-2014 Qlogic Corporation
7  * All rights reserved.
8  *
9  *  Redistribution and use in source and binary forms, with or without
10  *  modification, are permitted provided that the following conditions
11  *  are met:
12  *
13  *  1. Redistributions of source code must retain the above copyright
14  *     notice, this list of conditions and the following disclaimer.
15  *  2. Redistributions in binary form must reproduce the above copyright
16  *     notice, this list of conditions and the following disclaimer in the
17  *     documentation and/or other materials provided with the distribution.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 /*
33  * File: qls_isr.c
34  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
35  */
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39
40
41 #include "qls_os.h"
42 #include "qls_hw.h"
43 #include "qls_def.h"
44 #include "qls_inline.h"
45 #include "qls_ver.h"
46 #include "qls_glbl.h"
47 #include "qls_dbg.h"
48
49
50 static void
51 qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp)
52 {
53         qla_tx_buf_t *txb;
54         uint32_t tx_idx = tx_comp->tid_lo;
55
56         if (tx_idx >= NUM_TX_DESCRIPTORS) {
57                 ha->qla_initiate_recovery = 1;
58                 return;
59         }
60
61         txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx];
62
63         if (txb->m_head) {
64                 if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
65                 bus_dmamap_sync(ha->tx_tag, txb->map,
66                         BUS_DMASYNC_POSTWRITE);
67                 bus_dmamap_unload(ha->tx_tag, txb->map);
68                 m_freem(txb->m_head);
69
70                 txb->m_head = NULL;
71         }
72
73         ha->tx_ring[txr_idx].txr_done++;
74
75         if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS)
76                 ha->tx_ring[txr_idx].txr_done = 0;
77 }
78
79 static void
80 qls_replenish_rx(qla_host_t *ha, uint32_t r_idx)
81 {
82         qla_rx_buf_t                    *rxb;
83         qla_rx_ring_t                   *rxr;
84         int                             count;
85         volatile q81_bq_addr_e_t        *sbq_e;
86
87         rxr = &ha->rx_ring[r_idx];
88
89         count = rxr->rx_free;
90         sbq_e = rxr->sbq_vaddr;
91
92         while (count--) {
93
94                 rxb = &rxr->rx_buf[rxr->sbq_next];
95
96                 if (rxb->m_head == NULL) {
97                         if (qls_get_mbuf(ha, rxb, NULL) != 0) {
98                                 device_printf(ha->pci_dev,
99                                         "%s: qls_get_mbuf [0,%d,%d] failed\n",
100                                         __func__, rxr->sbq_next, r_idx);
101                                 rxb->m_head = NULL;
102                                 break;
103                         }
104                 }
105
106                 if (rxb->m_head != NULL) {
107                         sbq_e[rxr->sbq_next].addr_lo = (uint32_t)rxb->paddr;
108                         sbq_e[rxr->sbq_next].addr_hi =
109                                 (uint32_t)(rxb->paddr >> 32);
110
111                         rxr->sbq_next++;
112                         if (rxr->sbq_next == NUM_RX_DESCRIPTORS)
113                                 rxr->sbq_next = 0;
114
115                         rxr->sbq_free++;
116                         rxr->rx_free--;
117                 }
118
119                 if (rxr->sbq_free == 16) {
120
121                         rxr->sbq_in += 16;
122                         rxr->sbq_in = rxr->sbq_in & (NUM_RX_DESCRIPTORS - 1);
123                         rxr->sbq_free = 0;
124
125                         Q81_WR_SBQ_PROD_IDX(r_idx, (rxr->sbq_in));
126                 }
127         }
128 }
129
130 static int
131 qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e)
132 {
133         qla_rx_buf_t    *rxb;
134         qla_rx_ring_t   *rxr;
135         device_t        dev = ha->pci_dev;
136         struct mbuf     *mp = NULL;
137         struct ifnet    *ifp = ha->ifp;
138         struct lro_ctrl *lro;
139         struct ether_vlan_header *eh;
140
141         rxr = &ha->rx_ring[rxr_idx];
142
143         lro = &rxr->lro;
144
145         rxb = &rxr->rx_buf[rxr->rx_next];
146
147         if (!(cq_e->flags1 & Q81_RX_FLAGS1_DS)) {
148                 device_printf(dev, "%s: DS bit not set \n", __func__);
149                 return -1;
150         }
151         if (rxb->paddr != cq_e->b_paddr) {
152
153                 device_printf(dev,
154                         "%s: (rxb->paddr != cq_e->b_paddr)[%p, %p] \n",
155                         __func__, (void *)rxb->paddr, (void *)cq_e->b_paddr);
156
157                 Q81_SET_CQ_INVALID(cq_idx);
158
159                 ha->qla_initiate_recovery = 1;
160
161                 return(-1);
162         }
163
164         rxr->rx_int++;
165
166         if ((cq_e->flags1 & Q81_RX_FLAGS1_ERR_MASK) == 0) {
167
168                 mp = rxb->m_head;
169                 rxb->m_head = NULL;
170
171                 if (mp == NULL) {
172                         device_printf(dev, "%s: mp == NULL\n", __func__);
173                 } else {
174                         mp->m_flags |= M_PKTHDR;
175                         mp->m_pkthdr.len = cq_e->length;
176                         mp->m_pkthdr.rcvif = ifp;
177                         mp->m_len = cq_e->length;
178
179                         eh = mtod(mp, struct ether_vlan_header *);
180
181                         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
182                                 uint32_t *data = (uint32_t *)eh;
183
184                                 mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
185                                 mp->m_flags |= M_VLANTAG;
186
187                                 *(data + 3) = *(data + 2);
188                                 *(data + 2) = *(data + 1);
189                                 *(data + 1) = *data;
190
191                                 m_adj(mp, ETHER_VLAN_ENCAP_LEN);
192                         }
193
194                         if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) {
195                                 rxr->rss_int++;
196                                 mp->m_pkthdr.flowid = cq_e->rss;
197                                 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE_HASH);
198                         }
199                         if (cq_e->flags0 & (Q81_RX_FLAGS0_TE |
200                                 Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) {
201                                 mp->m_pkthdr.csum_flags = 0;
202                         } else {
203                                 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
204                                         CSUM_IP_VALID | CSUM_DATA_VALID |
205                                         CSUM_PSEUDO_HDR;
206                                 mp->m_pkthdr.csum_data = 0xFFFF;
207                         }
208                         if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
209
210                         if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
211                                 /* LRO packet has been successfully queued */
212                         } else {
213                                 (*ifp->if_input)(ifp, mp);
214                         }
215                 }
216         } else {
217                 device_printf(dev, "%s: err [0%08x]\n", __func__, cq_e->flags1);
218         }
219
220         rxr->rx_free++;
221         rxr->rx_next++;
222
223         if (rxr->rx_next == NUM_RX_DESCRIPTORS)
224                 rxr->rx_next = 0;
225
226         if ((rxr->rx_free + rxr->sbq_free) >= 16)
227                 qls_replenish_rx(ha, rxr_idx);
228
229         return 0;
230 }
231
232 static void
233 qls_cq_isr(qla_host_t *ha, uint32_t cq_idx)
234 {
235         q81_cq_e_t *cq_e, *cq_b;
236         uint32_t i, cq_comp_idx;
237         int ret = 0, tx_comp_done = 0;
238         struct lro_ctrl *lro;
239
240         cq_b = ha->rx_ring[cq_idx].cq_base_vaddr;
241         lro = &ha->rx_ring[cq_idx].lro;
242
243         cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
244
245         i = ha->rx_ring[cq_idx].cq_next;
246
247         while (i != cq_comp_idx) {
248
249                 cq_e = &cq_b[i];
250
251                 switch (cq_e->opcode) {
252
253                 case Q81_IOCB_TX_MAC:
254                 case Q81_IOCB_TX_TSO:
255                         qls_tx_comp(ha, cq_idx, (q81_tx_mac_comp_t *)cq_e);
256                         tx_comp_done++;
257                         break;
258
259                 case Q81_IOCB_RX:
260                         ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e);
261         
262                         break;
263
264                 case Q81_IOCB_MPI:
265                 case Q81_IOCB_SYS:
266                 default:
267                         device_printf(ha->pci_dev, "%s[%d %d 0x%x]: illegal \n",
268                                 __func__, i, (*(ha->rx_ring[cq_idx].cqi_vaddr)),
269                                 cq_e->opcode);
270                         qls_dump_buf32(ha, __func__, cq_e,
271                                 (sizeof (q81_cq_e_t) >> 2));
272                         break;
273                 }
274
275                 i++;
276                 if (i == NUM_CQ_ENTRIES)
277                         i = 0;
278
279                 if (ret) {
280                         break;
281                 }
282
283                 if (i == cq_comp_idx) {
284                         cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
285                 }
286
287                 if (tx_comp_done) {
288                         taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
289                         tx_comp_done = 0;
290                 }
291         }
292
293         tcp_lro_flush_all(lro);
294
295         ha->rx_ring[cq_idx].cq_next = cq_comp_idx;
296
297         if (!ret) {
298                 Q81_WR_CQ_CONS_IDX(cq_idx, (ha->rx_ring[cq_idx].cq_next));
299         }
300         if (tx_comp_done)
301                 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
302
303         return;
304 }
305
306 static void
307 qls_mbx_isr(qla_host_t *ha)
308 {
309         uint32_t data;
310         int i;
311         device_t dev = ha->pci_dev;
312
313         if (qls_mbx_rd_reg(ha, 0, &data) == 0) {
314
315                 if ((data & 0xF000) == 0x4000) {
316                         ha->mbox[0] = data;
317                         for (i = 1; i < Q81_NUM_MBX_REGISTERS; i++) {
318                                 if (qls_mbx_rd_reg(ha, i, &data))
319                                         break; 
320                                 ha->mbox[i] = data;
321                         }
322                         ha->mbx_done = 1;
323                 } else if ((data & 0xF000) == 0x8000) {
324
325                         /* we have an AEN */
326         
327                         ha->aen[0] = data;
328                         for (i = 1; i < Q81_NUM_AEN_REGISTERS; i++) {
329                                 if (qls_mbx_rd_reg(ha, i, &data))
330                                         break; 
331                                 ha->aen[i] = data;
332                         }
333                         device_printf(dev,"%s: AEN "
334                                 "[0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
335                                 " 0x%08x 0x%08x 0x%08x 0x%08x]\n",
336                                 __func__,
337                                 ha->aen[0], ha->aen[1], ha->aen[2],
338                                 ha->aen[3], ha->aen[4], ha->aen[5],
339                                 ha->aen[6], ha->aen[7], ha->aen[8]);
340
341                         switch ((ha->aen[0] & 0xFFFF)) {
342
343                         case 0x8011:
344                                 ha->link_up = 1;
345                                 break;
346
347                         case 0x8012:
348                                 ha->link_up = 0;
349                                 break;
350
351                         case 0x8130:
352                                 ha->link_hw_info = ha->aen[1];
353                                 break;
354
355                         case 0x8131:
356                                 ha->link_hw_info = 0;
357                                 break;
358
359                         }
360                 } 
361         }
362         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_CLR_RTH_INTR);
363
364         return;
365 }
366
367 void
368 qls_isr(void *arg)
369 {
370         qla_ivec_t *ivec = arg;
371         qla_host_t *ha;
372         uint32_t status;
373         uint32_t cq_idx;
374         device_t dev;
375
376         ha = ivec->ha;
377         cq_idx = ivec->cq_idx;
378         dev = ha->pci_dev;
379
380         status = READ_REG32(ha, Q81_CTL_STATUS);
381
382         if (status & Q81_CTL_STATUS_FE) {
383                 device_printf(dev, "%s fatal error\n", __func__);
384                 return;
385         }
386
387         if ((cq_idx == 0) && (status & Q81_CTL_STATUS_PI)) {
388                 qls_mbx_isr(ha);
389         }
390
391         status = READ_REG32(ha, Q81_CTL_INTR_STATUS1);
392
393         if (status & ( 0x1 << cq_idx))
394                 qls_cq_isr(ha, cq_idx);
395
396         Q81_ENABLE_INTR(ha, cq_idx);
397
398         return;
399 }
400