2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013-2016 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
42 #include "ql_inline.h"
47 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
51 qla_rcv_error(qla_host_t *ha)
54 QL_INITIATE_RECOVERY(ha);
60 * Function: Handles normal ethernet frames received
63 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
66 struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
67 struct ifnet *ifp = ha->ifp;
69 struct ether_vlan_header *eh;
70 uint32_t i, rem_len = 0;
72 qla_rx_ring_t *rx_ring;
75 lro = &ha->hw.sds[sds_idx].lro;
77 if (ha->hw.num_rds_rings > 1)
80 ha->hw.rds[r_idx].count++;
82 sdsp = &ha->hw.sds[sds_idx];
83 rx_ring = &ha->rx_ring[r_idx];
85 for (i = 0; i < sgc->num_handles; i++) {
86 rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
88 QL_ASSERT(ha, (rxb != NULL),
89 ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
92 if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
94 device_printf(ha->pci_dev,
95 "%s invalid rxb[%d, %d, 0x%04x]\n",
96 __func__, sds_idx, i, sgc->handle[i]);
105 QL_ASSERT(ha, (mp != NULL),
106 ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
109 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
112 rxb->next = sdsp->rxb_free;
113 sdsp->rxb_free = rxb;
116 if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
118 device_printf(ha->pci_dev,
119 "%s mp == NULL [%d, %d, 0x%04x]\n",
120 __func__, sds_idx, i, sgc->handle[i]);
127 mp->m_flags |= M_PKTHDR;
128 mp->m_pkthdr.len = sgc->pkt_length;
129 mp->m_pkthdr.rcvif = ifp;
130 rem_len = mp->m_pkthdr.len;
132 mp->m_flags &= ~M_PKTHDR;
135 rem_len = rem_len - mp->m_len;
139 mpl->m_len = rem_len;
141 eh = mtod(mpf, struct ether_vlan_header *);
143 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
144 uint32_t *data = (uint32_t *)eh;
146 mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
147 mpf->m_flags |= M_VLANTAG;
149 *(data + 3) = *(data + 2);
150 *(data + 2) = *(data + 1);
153 m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
156 if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
157 mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
158 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
159 mpf->m_pkthdr.csum_data = 0xFFFF;
161 mpf->m_pkthdr.csum_flags = 0;
164 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
166 mpf->m_pkthdr.flowid = sgc->rss_hash;
168 #if __FreeBSD_version >= 1100000
169 M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
171 #if (__FreeBSD_version >= 903511 && __FreeBSD_version < 1100000)
172 M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
174 M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
176 #endif /* #if __FreeBSD_version >= 1100000 */
178 if (ha->hw.enable_soft_lro) {
180 #if (__FreeBSD_version >= 1100101)
182 tcp_lro_queue_mbuf(lro, mpf);
185 if (tcp_lro_rx(lro, mpf, 0))
186 (*ifp->if_input)(ifp, mpf);
188 #endif /* #if (__FreeBSD_version >= 1100101) */
192 (*ifp->if_input)(ifp, mpf);
195 if (sdsp->rx_free > ha->std_replenish)
196 qla_replenish_normal_rx(ha, sdsp, r_idx);
201 #define QLA_TCP_HDR_SIZE 20
202 #define QLA_TCP_TS_OPTION_SIZE 12
206 * Function: Handles normal ethernet frames received
209 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
212 struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
213 struct ifnet *ifp = ha->ifp;
215 struct ether_vlan_header *eh;
216 uint32_t i, rem_len = 0, pkt_length, iplen;
218 struct ip *ip = NULL;
219 struct ip6_hdr *ip6 = NULL;
222 qla_rx_ring_t *rx_ring;
224 if (ha->hw.num_rds_rings > 1)
227 ha->hw.rds[r_idx].count++;
229 rx_ring = &ha->rx_ring[r_idx];
231 ha->hw.rds[r_idx].lro_pkt_count++;
233 sdsp = &ha->hw.sds[sds_idx];
235 pkt_length = sgc->payload_length + sgc->l4_offset;
237 if (sgc->flags & Q8_LRO_COMP_TS) {
238 pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
240 pkt_length += QLA_TCP_HDR_SIZE;
242 ha->hw.rds[r_idx].lro_bytes += pkt_length;
244 for (i = 0; i < sgc->num_handles; i++) {
245 rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
247 QL_ASSERT(ha, (rxb != NULL),
248 ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
251 if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
253 device_printf(ha->pci_dev,
254 "%s invalid rxb[%d, %d, 0x%04x]\n",
255 __func__, sds_idx, i, sgc->handle[i]);
264 QL_ASSERT(ha, (mp != NULL),
265 ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
268 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
271 rxb->next = sdsp->rxb_free;
272 sdsp->rxb_free = rxb;
275 if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
277 device_printf(ha->pci_dev,
278 "%s mp == NULL [%d, %d, 0x%04x]\n",
279 __func__, sds_idx, i, sgc->handle[i]);
286 mp->m_flags |= M_PKTHDR;
287 mp->m_pkthdr.len = pkt_length;
288 mp->m_pkthdr.rcvif = ifp;
289 rem_len = mp->m_pkthdr.len;
291 mp->m_flags &= ~M_PKTHDR;
294 rem_len = rem_len - mp->m_len;
298 mpl->m_len = rem_len;
300 th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
302 if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
303 th->th_flags |= TH_PUSH;
305 m_adj(mpf, sgc->l2_offset);
307 eh = mtod(mpf, struct ether_vlan_header *);
309 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
310 uint32_t *data = (uint32_t *)eh;
312 mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
313 mpf->m_flags |= M_VLANTAG;
315 *(data + 3) = *(data + 2);
316 *(data + 2) = *(data + 1);
319 m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
321 etype = ntohs(eh->evl_proto);
323 etype = ntohs(eh->evl_encap_proto);
326 if (etype == ETHERTYPE_IP) {
327 ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
329 iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
332 ip->ip_len = htons(iplen);
336 M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
338 } else if (etype == ETHERTYPE_IPV6) {
339 ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
341 iplen = (th->th_off << 2) + sgc->payload_length;
343 ip6->ip6_plen = htons(iplen);
347 M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
352 if (sdsp->rx_free > ha->std_replenish)
353 qla_replenish_normal_rx(ha, sdsp, r_idx);
357 mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
358 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
359 mpf->m_pkthdr.csum_data = 0xFFFF;
361 mpf->m_pkthdr.flowid = sgc->rss_hash;
363 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
365 (*ifp->if_input)(ifp, mpf);
367 if (sdsp->rx_free > ha->std_replenish)
368 qla_replenish_normal_rx(ha, sdsp, r_idx);
374 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
375 uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
378 uint16_t num_handles;
379 q80_stat_desc_t *sdesc;
385 for (i = 0; i < dcount; i++) {
386 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
387 sdesc = (q80_stat_desc_t *)
388 &ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
390 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
392 if (!opcode || QL_ERR_INJECT(ha, INJCT_INV_CONT_OPCODE)) {
393 device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
394 __func__, (void *)sdesc->data[0],
395 (void *)sdesc->data[1]);
399 num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
401 device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
402 __func__, (void *)sdesc->data[0],
403 (void *)sdesc->data[1]);
407 if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
410 switch (num_handles) {
413 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
417 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
418 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
422 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
423 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
424 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
428 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
429 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
430 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
431 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
435 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
436 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
437 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
438 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
439 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
443 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
444 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
445 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
446 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
447 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
448 *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
452 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
453 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
454 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
455 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
456 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
457 *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
458 *handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
462 device_printf(ha->pci_dev,
463 "%s: invalid num handles %p %p\n",
464 __func__, (void *)sdesc->data[0],
465 (void *)sdesc->data[1]);
468 ("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
469 __func__, "invalid num handles", sds_idx, num_handles,
470 (void *)sdesc->data[0],(void *)sdesc->data[1]));
475 *nhandles = *nhandles + num_handles;
482 * Function: Main Interrupt Service Routine
485 ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
489 uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
490 volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
494 uint32_t sds_replenish_threshold = 0;
501 hw->sds[sds_idx].rcv_active = 1;
503 hw->sds[sds_idx].rcv_active = 0;
507 QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
512 comp_idx = hw->sds[sds_idx].sdsr_next;
514 while (count-- && !ha->stop_rcv) {
516 sdesc = (q80_stat_desc_t *)
517 &hw->sds[sds_idx].sds_ring_base[comp_idx];
519 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
526 case Q8_STAT_DESC_OPCODE_RCV_PKT:
530 bzero(&sgc, sizeof(qla_sgl_comp_t));
533 Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
534 sgc.rcv.num_handles = 1;
536 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
537 sgc.rcv.chksum_status =
538 Q8_STAT_DESC_STATUS((sdesc->data[1]));
541 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
543 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
545 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
547 qla_rx_intr(ha, &sgc.rcv, sds_idx);
550 case Q8_STAT_DESC_OPCODE_SGL_RCV:
553 Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
555 if (desc_count > 1) {
556 c_idx = (comp_idx + desc_count -1) &
557 (NUM_STATUS_DESCRIPTORS-1);
558 sdesc0 = (q80_stat_desc_t *)
559 &hw->sds[sds_idx].sds_ring_base[c_idx];
561 if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
562 Q8_STAT_DESC_OPCODE_CONT) ||
563 QL_ERR_INJECT(ha, INJCT_SGL_RCV_INV_DESC_COUNT)) {
569 bzero(&sgc, sizeof(qla_sgl_comp_t));
572 Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
574 sgc.rcv.chksum_status =
575 Q8_STAT_DESC_STATUS((sdesc->data[1]));
578 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
580 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
582 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
585 QL_ASSERT(ha, (desc_count <= 2) ,\
586 ("%s: [sds_idx, data0, data1]="\
587 "%d, %p, %p]\n", __func__, sds_idx,\
588 (void *)sdesc->data[0],\
589 (void *)sdesc->data[1]));
591 sgc.rcv.num_handles = 1;
593 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
595 if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
596 &sgc.rcv.handle[1], &nhandles)) {
598 "%s: [sds_idx, dcount, data0, data1]="
599 "[%d, %d, 0x%llx, 0x%llx]\n",
600 __func__, sds_idx, desc_count,
601 (long long unsigned int)sdesc->data[0],
602 (long long unsigned int)sdesc->data[1]);
607 sgc.rcv.num_handles += nhandles;
609 qla_rx_intr(ha, &sgc.rcv, sds_idx);
613 case Q8_STAT_DESC_OPCODE_SGL_LRO:
616 Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
618 if (desc_count > 1) {
619 c_idx = (comp_idx + desc_count -1) &
620 (NUM_STATUS_DESCRIPTORS-1);
621 sdesc0 = (q80_stat_desc_t *)
622 &hw->sds[sds_idx].sds_ring_base[c_idx];
624 if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
625 Q8_STAT_DESC_OPCODE_CONT) ||
626 QL_ERR_INJECT(ha, INJCT_SGL_LRO_INV_DESC_COUNT)) {
631 bzero(&sgc, sizeof(qla_sgl_comp_t));
633 sgc.lro.payload_length =
634 Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
637 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
639 sgc.lro.num_handles = 1;
641 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
643 if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
644 sgc.lro.flags |= Q8_LRO_COMP_TS;
646 if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
647 sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
650 Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
652 Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
654 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
656 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
659 QL_ASSERT(ha, (desc_count <= 7) ,\
660 ("%s: [sds_idx, data0, data1]="\
661 "[%d, 0x%llx, 0x%llx]\n",\
663 (long long unsigned int)sdesc->data[0],\
664 (long long unsigned int)sdesc->data[1]));
666 if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
667 desc_count, &sgc.lro.handle[1], &nhandles)) {
669 "%s: [sds_idx, data0, data1]="\
670 "[%d, 0x%llx, 0x%llx]\n",\
672 (long long unsigned int)sdesc->data[0],\
673 (long long unsigned int)sdesc->data[1]);
679 sgc.lro.num_handles += nhandles;
681 if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
683 "%s: [sds_idx, data0, data1]="\
684 "[%d, 0x%llx, 0x%llx]\n",\
686 (long long unsigned int)sdesc->data[0],\
687 (long long unsigned int)sdesc->data[1]);
689 "%s: [comp_idx, c_idx, dcount, nhndls]="\
690 "[%d, %d, %d, %d]\n",\
691 __func__, comp_idx, c_idx, desc_count,
692 sgc.lro.num_handles);
693 if (desc_count > 1) {
695 "%s: [sds_idx, data0, data1]="\
696 "[%d, 0x%llx, 0x%llx]\n",\
698 (long long unsigned int)sdesc0->data[0],\
699 (long long unsigned int)sdesc0->data[1]);
707 device_printf(dev, "%s: default 0x%llx!\n", __func__,
708 (long long unsigned int)sdesc->data[0]);
715 sds_replenish_threshold += desc_count;
718 while (desc_count--) {
719 sdesc->data[0] = 0ULL;
720 sdesc->data[1] = 0ULL;
721 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
722 sdesc = (q80_stat_desc_t *)
723 &hw->sds[sds_idx].sds_ring_base[comp_idx];
726 if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
727 sds_replenish_threshold = 0;
728 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
729 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
732 hw->sds[sds_idx].sdsr_next = comp_idx;
736 if (ha->hw.enable_soft_lro) {
737 struct lro_ctrl *lro;
739 lro = &ha->hw.sds[sds_idx].lro;
741 #if (__FreeBSD_version >= 1100101)
743 tcp_lro_flush_all(lro);
746 struct lro_entry *queued;
748 while ((!SLIST_EMPTY(&lro->lro_active))) {
749 queued = SLIST_FIRST(&lro->lro_active);
750 SLIST_REMOVE_HEAD(&lro->lro_active, next);
751 tcp_lro_flush(lro, queued);
754 #endif /* #if (__FreeBSD_version >= 1100101) */
759 goto ql_rcv_isr_exit;
761 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
762 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
763 hw->sds[sds_idx].sdsr_next = comp_idx;
765 if (ha->hw.num_rds_rings > 1)
768 sdsp = &ha->hw.sds[sds_idx];
770 if (sdsp->rx_free > ha->std_replenish)
771 qla_replenish_normal_rx(ha, sdsp, r_idx);
774 sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
775 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
781 hw->sds[sds_idx].rcv_active = 0;
787 ql_mbx_isr(void *arg)
791 uint32_t prev_link_state;
796 device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__);
800 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
801 if ((data & 0x3) != 0x1) {
802 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
806 data = READ_REG32(ha, Q8_FW_MBOX0);
808 if ((data & 0xF000) != 0x8000)
811 data = data & 0xFFFF;
815 case 0x8001: /* It's an AEN */
817 ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
819 data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
820 ha->hw.cable_length = data & 0xFFFF;
823 ha->hw.link_speed = data & 0xFFF;
825 data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
827 prev_link_state = ha->hw.link_up;
829 data = (((data & 0xFF) == 0) ? 0 : 1);
830 atomic_store_rel_8(&ha->hw.link_up, (uint8_t)data);
832 device_printf(ha->pci_dev,
833 "%s: AEN[0x8001] data = 0x%08x, prev_link_state = 0x%08x\n",
834 __func__, data, prev_link_state);
836 if (prev_link_state != ha->hw.link_up) {
838 if_link_state_change(ha->ifp, LINK_STATE_UP);
840 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
844 ha->hw.module_type = ((data >> 8) & 0xFF);
845 ha->hw.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
846 ha->hw.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
848 data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
849 ha->hw.loopback_mode = data & 0x03;
851 ha->hw.link_faults = (data >> 3) & 0xFF;
856 device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
862 ha->hw.aen_mb0 = 0x8101;
863 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
864 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
865 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
866 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
867 device_printf(ha->pci_dev, "%s: AEN[0x%08x 0x%08x 0x%08x 0%08x 0x%08x]\n",
868 __func__, data, ha->hw.aen_mb1, ha->hw.aen_mb2,
869 ha->hw.aen_mb3, ha->hw.aen_mb4);
873 /* for now just dump the registers */
877 ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
878 ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
879 ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
880 ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
881 ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
883 device_printf(ha->pci_dev, "%s: "
884 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
885 __func__, data, ombx[0], ombx[1], ombx[2],
892 /* sfp insertion aen */
893 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
894 __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
898 /* sfp removal aen */
899 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
906 ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
907 ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
908 ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
910 device_printf(ha->pci_dev, "%s: "
911 "0x%08x 0x%08x 0x%08x 0x%08x \n",
912 __func__, data, ombx[0], ombx[1], ombx[2]);
917 device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
920 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
921 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
927 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
930 int count = sdsp->rx_free;
934 /* we can play with this value via a sysctl */
935 uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
937 rdesc = &ha->hw.rds[r_idx];
939 rx_next = rdesc->rx_next;
942 rxb = sdsp->rxb_free;
947 sdsp->rxb_free = rxb->next;
950 if (ql_get_mbuf(ha, rxb, NULL) == 0) {
951 qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
953 rxb->paddr, (rxb->m_head)->m_pkthdr.len);
955 if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
958 if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
961 device_printf(ha->pci_dev,
962 "%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
963 __func__, r_idx, rdesc->rx_in, rxb->handle);
966 rxb->next = sdsp->rxb_free;
967 sdsp->rxb_free = rxb;
972 if (replenish_thresh-- == 0) {
973 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
975 rx_next = rdesc->rx_next;
976 replenish_thresh = ha->hw.rds_pidx_thres;
980 if (rx_next != rdesc->rx_next) {
981 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
989 qla_ivec_t *ivec = arg;
1000 if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
1003 fp = &ha->tx_fp[idx];
1004 hw->sds[idx].intr_count++;
1006 if ((fp->fp_taskqueue != NULL) &&
1007 (ifp->if_drv_flags & IFF_DRV_RUNNING))
1008 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);