2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011-2013 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
42 #include "qla_inline.h"
47 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp);
48 static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp);
52 * Function: Handles normal ethernet frames received
55 qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx,
58 uint32_t idx, length, status, ring;
61 struct ifnet *ifp = ha->ifp;
63 struct ether_vlan_header *eh;
65 sdsp = &ha->hw.sds[sds_idx];
67 ring = (uint32_t)Q8_STAT_DESC_TYPE(data);
68 idx = (uint32_t)Q8_STAT_DESC_HANDLE(data);
69 length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data);
70 status = (uint32_t)Q8_STAT_DESC_STATUS(data);
73 if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) {
74 device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
75 " len[0x%08x] invalid\n",
76 __func__, ring, idx, length);
80 if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) {
81 device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
82 " len[0x%08x] invalid\n",
83 __func__, ring, idx, length);
89 rxb = &ha->rx_buf[idx];
91 rxb = &ha->rx_jbuf[idx];
93 QL_ASSERT((rxb != NULL),\
94 ("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\
95 __func__, ring, idx, sds_idx));
99 QL_ASSERT((mp != NULL),\
100 ("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\
101 __func__, ring, idx, rxb, sds_idx));
103 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
107 rxb->next = sdsp->rxb_free;
108 sdsp->rxb_free = rxb;
112 rxb->next = sdsp->rxjb_free;
113 sdsp->rxjb_free = rxb;
118 mp->m_pkthdr.len = length;
119 mp->m_pkthdr.rcvif = ifp;
121 eh = mtod(mp, struct ether_vlan_header *);
123 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
124 uint32_t *data = (uint32_t *)eh;
126 mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
127 mp->m_flags |= M_VLANTAG;
129 *(data + 3) = *(data + 2);
130 *(data + 2) = *(data + 1);
133 m_adj(mp, ETHER_VLAN_ENCAP_LEN);
136 if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
137 mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
139 mp->m_pkthdr.csum_flags = 0;
142 if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
143 /* LRO packet has been successfully queued */
145 (*ifp->if_input)(ifp, mp);
148 if (sdsp->rx_free > std_replenish)
149 qla_replenish_normal_rx(ha, sdsp);
151 if (sdsp->rxj_free > jumbo_replenish)
152 qla_replenish_jumbo_rx(ha, sdsp);
158 qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp)
161 int count = jumbo_replenish;
164 if (!mtx_trylock(&ha->rxj_lock))
167 rxj_next = ha->hw.rxj_next;
170 rxb = sdsp->rxjb_free;
175 sdsp->rxjb_free = rxb->next;
179 if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_JUMBO) == 0) {
180 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO,
181 ha->hw.rxj_in, rxb->handle, rxb->paddr,
182 (rxb->m_head)->m_pkthdr.len);
184 if (ha->hw.rxj_in == NUM_RX_JUMBO_DESCRIPTORS)
187 if (ha->hw.rxj_next == NUM_RX_JUMBO_DESCRIPTORS)
190 device_printf(ha->pci_dev,
191 "%s: qla_get_mbuf [1,(%d),(%d)] failed\n",
192 __func__, ha->hw.rxj_in, rxb->handle);
195 rxb->next = sdsp->rxjb_free;
196 sdsp->rxjb_free = rxb;
203 if (rxj_next != ha->hw.rxj_next) {
204 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
206 mtx_unlock(&ha->rxj_lock);
210 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp)
213 int count = std_replenish;
216 if (!mtx_trylock(&ha->rx_lock))
219 rx_next = ha->hw.rx_next;
222 rxb = sdsp->rxb_free;
227 sdsp->rxb_free = rxb->next;
230 if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_NORMAL) == 0) {
231 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL,
232 ha->hw.rx_in, rxb->handle, rxb->paddr,
233 (rxb->m_head)->m_pkthdr.len);
235 if (ha->hw.rx_in == NUM_RX_DESCRIPTORS)
238 if (ha->hw.rx_next == NUM_RX_DESCRIPTORS)
241 device_printf(ha->pci_dev,
242 "%s: qla_get_mbuf [0,(%d),(%d)] failed\n",
243 __func__, ha->hw.rx_in, rxb->handle);
246 rxb->next = sdsp->rxb_free;
247 sdsp->rxb_free = rxb;
254 if (rx_next != ha->hw.rx_next) {
255 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
257 mtx_unlock(&ha->rx_lock);
262 * Function: Main Interrupt Service Routine
265 qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
269 uint32_t comp_idx, desc_count;
270 q80_stat_desc_t *sdesc;
271 struct lro_ctrl *lro;
277 hw->sds[sds_idx].rcv_active = 1;
278 if (ha->flags.stop_rcv) {
279 hw->sds[sds_idx].rcv_active = 0;
283 QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx));
288 comp_idx = hw->sds[sds_idx].sdsr_next;
289 lro = &hw->sds[sds_idx].lro;
293 sdesc = (q80_stat_desc_t *)
294 &hw->sds[sds_idx].sds_ring_base[comp_idx];
296 if (Q8_STAT_DESC_OWNER((sdesc->data[0])) !=
297 Q8_STAT_DESC_OWNER_HOST) {
298 QL_DPRINT2((dev, "%s: data %p sdsr_next 0x%08x\n",
299 __func__, (void *)sdesc->data[0], comp_idx));
303 desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0]));
305 switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) {
307 case Q8_STAT_DESC_OPCODE_RCV_PKT:
308 case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
309 qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
314 device_printf(dev, "%s: default 0x%llx!\n", __func__,
315 (long long unsigned int)sdesc->data[0]);
319 while (desc_count--) {
321 Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
322 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
323 sdesc = (q80_stat_desc_t *)
324 &hw->sds[sds_idx].sds_ring_base[comp_idx];
328 tcp_lro_flush_all(lro);
330 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
331 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
333 hw->sds[sds_idx].sdsr_next = comp_idx;
335 sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
336 if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) ==
337 Q8_STAT_DESC_OWNER_HOST)) {
341 hw->sds[sds_idx].rcv_active = 0;
348 qla_ivec_t *ivec = arg;
354 sds_idx = ivec->irq_rid - 1;
356 if (sds_idx >= ha->hw.num_sds_rings) {
357 device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__,
364 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
366 ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres);
369 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
372 taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq,
373 &ha->irq_vec[sds_idx].rcv_task);
375 QL_ENABLE_INTERRUPTS(ha, sds_idx);
380 qla_rcv(void *context, int pending)
382 qla_ivec_t *ivec = context;
393 sds_idx = ivec->irq_rid - 1;
398 if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
399 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
400 } else if ((ifp->if_snd.ifq_head != NULL) &&
402 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
405 ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d);
409 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
411 QL_ENABLE_INTERRUPTS(ha, sds_idx);