2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011-2013 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
42 #include "qla_inline.h"
47 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp);
48 static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp);
52 * Function: Handles normal ethernet frames received
55 qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx,
58 uint32_t idx, length, status, ring;
61 struct ifnet *ifp = ha->ifp;
63 struct ether_vlan_header *eh;
65 sdsp = &ha->hw.sds[sds_idx];
67 ring = (uint32_t)Q8_STAT_DESC_TYPE(data);
68 idx = (uint32_t)Q8_STAT_DESC_HANDLE(data);
69 length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data);
70 status = (uint32_t)Q8_STAT_DESC_STATUS(data);
73 if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) {
74 device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
75 " len[0x%08x] invalid\n",
76 __func__, ring, idx, length);
80 if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) {
81 device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
82 " len[0x%08x] invalid\n",
83 __func__, ring, idx, length);
89 rxb = &ha->rx_buf[idx];
91 rxb = &ha->rx_jbuf[idx];
93 QL_ASSERT((rxb != NULL),\
94 ("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\
95 __func__, ring, idx, sds_idx));
99 QL_ASSERT((mp != NULL),\
100 ("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\
101 __func__, ring, idx, rxb, sds_idx));
103 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
107 rxb->next = sdsp->rxb_free;
108 sdsp->rxb_free = rxb;
112 rxb->next = sdsp->rxjb_free;
113 sdsp->rxjb_free = rxb;
118 mp->m_pkthdr.len = length;
119 mp->m_pkthdr.rcvif = ifp;
121 eh = mtod(mp, struct ether_vlan_header *);
123 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
124 uint32_t *data = (uint32_t *)eh;
126 mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
127 mp->m_flags |= M_VLANTAG;
129 *(data + 3) = *(data + 2);
130 *(data + 2) = *(data + 1);
133 m_adj(mp, ETHER_VLAN_ENCAP_LEN);
136 if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
137 mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
139 mp->m_pkthdr.csum_flags = 0;
142 if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
143 /* LRO packet has been successfully queued */
145 (*ifp->if_input)(ifp, mp);
148 if (sdsp->rx_free > std_replenish)
149 qla_replenish_normal_rx(ha, sdsp);
151 if (sdsp->rxj_free > jumbo_replenish)
152 qla_replenish_jumbo_rx(ha, sdsp);
158 qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp)
161 int count = jumbo_replenish;
164 if (!mtx_trylock(&ha->rxj_lock))
167 rxj_next = ha->hw.rxj_next;
170 rxb = sdsp->rxjb_free;
175 sdsp->rxjb_free = rxb->next;
178 if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_JUMBO) == 0) {
179 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO,
180 ha->hw.rxj_in, rxb->handle, rxb->paddr,
181 (rxb->m_head)->m_pkthdr.len);
183 if (ha->hw.rxj_in == NUM_RX_JUMBO_DESCRIPTORS)
186 if (ha->hw.rxj_next == NUM_RX_JUMBO_DESCRIPTORS)
189 device_printf(ha->pci_dev,
190 "%s: qla_get_mbuf [1,(%d),(%d)] failed\n",
191 __func__, ha->hw.rxj_in, rxb->handle);
194 rxb->next = sdsp->rxjb_free;
195 sdsp->rxjb_free = rxb;
202 if (rxj_next != ha->hw.rxj_next) {
203 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
205 mtx_unlock(&ha->rxj_lock);
209 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp)
212 int count = std_replenish;
215 if (!mtx_trylock(&ha->rx_lock))
218 rx_next = ha->hw.rx_next;
221 rxb = sdsp->rxb_free;
226 sdsp->rxb_free = rxb->next;
229 if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_NORMAL) == 0) {
230 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL,
231 ha->hw.rx_in, rxb->handle, rxb->paddr,
232 (rxb->m_head)->m_pkthdr.len);
234 if (ha->hw.rx_in == NUM_RX_DESCRIPTORS)
237 if (ha->hw.rx_next == NUM_RX_DESCRIPTORS)
240 device_printf(ha->pci_dev,
241 "%s: qla_get_mbuf [0,(%d),(%d)] failed\n",
242 __func__, ha->hw.rx_in, rxb->handle);
245 rxb->next = sdsp->rxb_free;
246 sdsp->rxb_free = rxb;
253 if (rx_next != ha->hw.rx_next) {
254 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
256 mtx_unlock(&ha->rx_lock);
261 * Function: Main Interrupt Service Routine
264 qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
268 uint32_t comp_idx, desc_count;
269 q80_stat_desc_t *sdesc;
270 struct lro_ctrl *lro;
276 hw->sds[sds_idx].rcv_active = 1;
277 if (ha->flags.stop_rcv) {
278 hw->sds[sds_idx].rcv_active = 0;
282 QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx));
287 comp_idx = hw->sds[sds_idx].sdsr_next;
288 lro = &hw->sds[sds_idx].lro;
291 sdesc = (q80_stat_desc_t *)
292 &hw->sds[sds_idx].sds_ring_base[comp_idx];
294 if (Q8_STAT_DESC_OWNER((sdesc->data[0])) !=
295 Q8_STAT_DESC_OWNER_HOST) {
296 QL_DPRINT2((dev, "%s: data %p sdsr_next 0x%08x\n",
297 __func__, (void *)sdesc->data[0], comp_idx));
301 desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0]));
303 switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) {
304 case Q8_STAT_DESC_OPCODE_RCV_PKT:
305 case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
306 qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
311 device_printf(dev, "%s: default 0x%llx!\n", __func__,
312 (long long unsigned int)sdesc->data[0]);
316 while (desc_count--) {
318 Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
319 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
320 sdesc = (q80_stat_desc_t *)
321 &hw->sds[sds_idx].sds_ring_base[comp_idx];
325 tcp_lro_flush_all(lro);
327 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
328 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
330 hw->sds[sds_idx].sdsr_next = comp_idx;
332 sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
333 if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) ==
334 Q8_STAT_DESC_OWNER_HOST)) {
338 hw->sds[sds_idx].rcv_active = 0;
345 qla_ivec_t *ivec = arg;
351 sds_idx = ivec->irq_rid - 1;
353 if (sds_idx >= ha->hw.num_sds_rings) {
354 device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__,
361 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
363 ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres);
366 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
369 taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq,
370 &ha->irq_vec[sds_idx].rcv_task);
372 QL_ENABLE_INTERRUPTS(ha, sds_idx);
377 qla_rcv(void *context, int pending)
379 qla_ivec_t *ivec = context;
390 sds_idx = ivec->irq_rid - 1;
395 if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
396 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
397 } else if ((ifp->if_snd.ifq_head != NULL) &&
399 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
402 ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d);
406 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
408 QL_ENABLE_INTERRUPTS(ha, sds_idx);