2 * Copyright (c) 2013-2016 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
40 #include "ql_inline.h"
45 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
49 qla_rcv_error(qla_host_t *ha)
51 ha->flags.stop_rcv = 1;
52 ha->qla_initiate_recovery = 1;
58 * Function: Handles normal ethernet frames received
61 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
64 struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
65 struct ifnet *ifp = ha->ifp;
67 struct ether_vlan_header *eh;
68 uint32_t i, rem_len = 0;
70 qla_rx_ring_t *rx_ring;
73 lro = &ha->hw.sds[sds_idx].lro;
75 if (ha->hw.num_rds_rings > 1)
78 ha->hw.rds[r_idx].count++;
80 sdsp = &ha->hw.sds[sds_idx];
81 rx_ring = &ha->rx_ring[r_idx];
83 for (i = 0; i < sgc->num_handles; i++) {
84 rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
86 QL_ASSERT(ha, (rxb != NULL),
87 ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
90 if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
92 device_printf(ha->pci_dev,
93 "%s invalid rxb[%d, %d, 0x%04x]\n",
94 __func__, sds_idx, i, sgc->handle[i]);
103 QL_ASSERT(ha, (mp != NULL),
104 ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
107 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
110 rxb->next = sdsp->rxb_free;
111 sdsp->rxb_free = rxb;
114 if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
116 device_printf(ha->pci_dev,
117 "%s mp == NULL [%d, %d, 0x%04x]\n",
118 __func__, sds_idx, i, sgc->handle[i]);
125 mp->m_flags |= M_PKTHDR;
126 mp->m_pkthdr.len = sgc->pkt_length;
127 mp->m_pkthdr.rcvif = ifp;
128 rem_len = mp->m_pkthdr.len;
130 mp->m_flags &= ~M_PKTHDR;
133 rem_len = rem_len - mp->m_len;
137 mpl->m_len = rem_len;
139 eh = mtod(mpf, struct ether_vlan_header *);
141 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
142 uint32_t *data = (uint32_t *)eh;
144 mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
145 mpf->m_flags |= M_VLANTAG;
147 *(data + 3) = *(data + 2);
148 *(data + 2) = *(data + 1);
151 m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
154 if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
155 mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
156 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
157 mpf->m_pkthdr.csum_data = 0xFFFF;
159 mpf->m_pkthdr.csum_flags = 0;
164 mpf->m_pkthdr.flowid = sgc->rss_hash;
165 mpf->m_flags |= M_FLOWID;
167 if (ha->hw.enable_soft_lro) {
169 #if (__FreeBSD_version >= 1100101)
171 tcp_lro_queue_mbuf(lro, mpf);
174 if (tcp_lro_rx(lro, mpf, 0))
175 (*ifp->if_input)(ifp, mpf);
177 #endif /* #if (__FreeBSD_version >= 1100101) */
181 (*ifp->if_input)(ifp, mpf);
184 if (sdsp->rx_free > ha->std_replenish)
185 qla_replenish_normal_rx(ha, sdsp, r_idx);
190 #define QLA_TCP_HDR_SIZE 20
191 #define QLA_TCP_TS_OPTION_SIZE 12
195 * Function: Handles normal ethernet frames received
198 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
201 struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
202 struct ifnet *ifp = ha->ifp;
204 struct ether_vlan_header *eh;
205 uint32_t i, rem_len = 0, pkt_length, iplen;
207 struct ip *ip = NULL;
208 struct ip6_hdr *ip6 = NULL;
211 qla_rx_ring_t *rx_ring;
213 if (ha->hw.num_rds_rings > 1)
216 ha->hw.rds[r_idx].count++;
218 rx_ring = &ha->rx_ring[r_idx];
222 sdsp = &ha->hw.sds[sds_idx];
224 pkt_length = sgc->payload_length + sgc->l4_offset;
226 if (sgc->flags & Q8_LRO_COMP_TS) {
227 pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
229 pkt_length += QLA_TCP_HDR_SIZE;
231 ha->lro_bytes += pkt_length;
233 for (i = 0; i < sgc->num_handles; i++) {
234 rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
236 QL_ASSERT(ha, (rxb != NULL),
237 ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
240 if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
242 device_printf(ha->pci_dev,
243 "%s invalid rxb[%d, %d, 0x%04x]\n",
244 __func__, sds_idx, i, sgc->handle[i]);
253 QL_ASSERT(ha, (mp != NULL),
254 ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
257 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
260 rxb->next = sdsp->rxb_free;
261 sdsp->rxb_free = rxb;
264 if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
266 device_printf(ha->pci_dev,
267 "%s mp == NULL [%d, %d, 0x%04x]\n",
268 __func__, sds_idx, i, sgc->handle[i]);
275 mp->m_flags |= M_PKTHDR;
276 mp->m_pkthdr.len = pkt_length;
277 mp->m_pkthdr.rcvif = ifp;
278 rem_len = mp->m_pkthdr.len;
280 mp->m_flags &= ~M_PKTHDR;
283 rem_len = rem_len - mp->m_len;
287 mpl->m_len = rem_len;
289 th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
291 if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
292 th->th_flags |= TH_PUSH;
294 m_adj(mpf, sgc->l2_offset);
296 eh = mtod(mpf, struct ether_vlan_header *);
298 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
299 uint32_t *data = (uint32_t *)eh;
301 mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
302 mpf->m_flags |= M_VLANTAG;
304 *(data + 3) = *(data + 2);
305 *(data + 2) = *(data + 1);
308 m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
310 etype = ntohs(eh->evl_proto);
312 etype = ntohs(eh->evl_encap_proto);
315 if (etype == ETHERTYPE_IP) {
316 ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
318 iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
321 ip->ip_len = htons(iplen);
325 M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
327 } else if (etype == ETHERTYPE_IPV6) {
328 ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
330 iplen = (th->th_off << 2) + sgc->payload_length;
332 ip6->ip6_plen = htons(iplen);
336 M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
341 if (sdsp->rx_free > ha->std_replenish)
342 qla_replenish_normal_rx(ha, sdsp, r_idx);
346 mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
347 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
348 mpf->m_pkthdr.csum_data = 0xFFFF;
350 mpf->m_pkthdr.flowid = sgc->rss_hash;
351 mpf->m_flags |= M_FLOWID;
355 (*ifp->if_input)(ifp, mpf);
357 if (sdsp->rx_free > ha->std_replenish)
358 qla_replenish_normal_rx(ha, sdsp, r_idx);
364 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
365 uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
368 uint16_t num_handles;
369 q80_stat_desc_t *sdesc;
375 for (i = 0; i < dcount; i++) {
376 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
377 sdesc = (q80_stat_desc_t *)
378 &ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
380 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
383 device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
384 __func__, (void *)sdesc->data[0],
385 (void *)sdesc->data[1]);
389 num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
391 device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
392 __func__, (void *)sdesc->data[0],
393 (void *)sdesc->data[1]);
397 if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
400 switch (num_handles) {
403 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
407 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
408 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
412 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
413 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
414 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
418 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
419 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
420 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
421 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
425 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
426 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
427 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
428 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
429 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
433 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
434 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
435 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
436 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
437 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
438 *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
442 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
443 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
444 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
445 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
446 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
447 *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
448 *handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
452 device_printf(ha->pci_dev,
453 "%s: invalid num handles %p %p\n",
454 __func__, (void *)sdesc->data[0],
455 (void *)sdesc->data[1]);
458 ("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
459 __func__, "invalid num handles", sds_idx, num_handles,
460 (void *)sdesc->data[0],(void *)sdesc->data[1]));
465 *nhandles = *nhandles + num_handles;
472 * Function: Main Interrupt Service Routine
475 ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
479 uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
480 volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
484 uint32_t sds_replenish_threshold = 0;
491 hw->sds[sds_idx].rcv_active = 1;
492 if (ha->flags.stop_rcv) {
493 hw->sds[sds_idx].rcv_active = 0;
497 QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
502 comp_idx = hw->sds[sds_idx].sdsr_next;
504 while (count-- && !ha->flags.stop_rcv) {
506 sdesc = (q80_stat_desc_t *)
507 &hw->sds[sds_idx].sds_ring_base[comp_idx];
509 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
514 hw->sds[sds_idx].intr_count++;
517 case Q8_STAT_DESC_OPCODE_RCV_PKT:
521 bzero(&sgc, sizeof(qla_sgl_comp_t));
524 Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
525 sgc.rcv.num_handles = 1;
527 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
528 sgc.rcv.chksum_status =
529 Q8_STAT_DESC_STATUS((sdesc->data[1]));
532 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
534 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
536 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
538 qla_rx_intr(ha, &sgc.rcv, sds_idx);
541 case Q8_STAT_DESC_OPCODE_SGL_RCV:
544 Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
546 if (desc_count > 1) {
547 c_idx = (comp_idx + desc_count -1) &
548 (NUM_STATUS_DESCRIPTORS-1);
549 sdesc0 = (q80_stat_desc_t *)
550 &hw->sds[sds_idx].sds_ring_base[c_idx];
552 if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
553 Q8_STAT_DESC_OPCODE_CONT) {
559 bzero(&sgc, sizeof(qla_sgl_comp_t));
562 Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
564 sgc.rcv.chksum_status =
565 Q8_STAT_DESC_STATUS((sdesc->data[1]));
568 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
570 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
572 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
575 QL_ASSERT(ha, (desc_count <= 2) ,\
576 ("%s: [sds_idx, data0, data1]="\
577 "%d, %p, %p]\n", __func__, sds_idx,\
578 (void *)sdesc->data[0],\
579 (void *)sdesc->data[1]));
581 sgc.rcv.num_handles = 1;
583 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
585 if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
586 &sgc.rcv.handle[1], &nhandles)) {
588 "%s: [sds_idx, dcount, data0, data1]="
589 "[%d, %d, 0x%llx, 0x%llx]\n",
590 __func__, sds_idx, desc_count,
591 (long long unsigned int)sdesc->data[0],
592 (long long unsigned int)sdesc->data[1]);
597 sgc.rcv.num_handles += nhandles;
599 qla_rx_intr(ha, &sgc.rcv, sds_idx);
603 case Q8_STAT_DESC_OPCODE_SGL_LRO:
606 Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
608 if (desc_count > 1) {
609 c_idx = (comp_idx + desc_count -1) &
610 (NUM_STATUS_DESCRIPTORS-1);
611 sdesc0 = (q80_stat_desc_t *)
612 &hw->sds[sds_idx].sds_ring_base[c_idx];
614 if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
615 Q8_STAT_DESC_OPCODE_CONT) {
620 bzero(&sgc, sizeof(qla_sgl_comp_t));
622 sgc.lro.payload_length =
623 Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
626 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
628 sgc.lro.num_handles = 1;
630 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
632 if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
633 sgc.lro.flags |= Q8_LRO_COMP_TS;
635 if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
636 sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
639 Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
641 Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
643 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
645 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
648 QL_ASSERT(ha, (desc_count <= 7) ,\
649 ("%s: [sds_idx, data0, data1]="\
650 "[%d, 0x%llx, 0x%llx]\n",\
652 (long long unsigned int)sdesc->data[0],\
653 (long long unsigned int)sdesc->data[1]));
655 if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
656 desc_count, &sgc.lro.handle[1], &nhandles)) {
658 "%s: [sds_idx, data0, data1]="\
659 "[%d, 0x%llx, 0x%llx]\n",\
661 (long long unsigned int)sdesc->data[0],\
662 (long long unsigned int)sdesc->data[1]);
668 sgc.lro.num_handles += nhandles;
670 if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
672 "%s: [sds_idx, data0, data1]="\
673 "[%d, 0x%llx, 0x%llx]\n",\
675 (long long unsigned int)sdesc->data[0],\
676 (long long unsigned int)sdesc->data[1]);
678 "%s: [comp_idx, c_idx, dcount, nhndls]="\
679 "[%d, %d, %d, %d]\n",\
680 __func__, comp_idx, c_idx, desc_count,
681 sgc.lro.num_handles);
682 if (desc_count > 1) {
684 "%s: [sds_idx, data0, data1]="\
685 "[%d, 0x%llx, 0x%llx]\n",\
687 (long long unsigned int)sdesc0->data[0],\
688 (long long unsigned int)sdesc0->data[1]);
695 device_printf(dev, "%s: default 0x%llx!\n", __func__,
696 (long long unsigned int)sdesc->data[0]);
703 sds_replenish_threshold += desc_count;
706 while (desc_count--) {
707 sdesc->data[0] = 0ULL;
708 sdesc->data[1] = 0ULL;
709 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
710 sdesc = (q80_stat_desc_t *)
711 &hw->sds[sds_idx].sds_ring_base[comp_idx];
714 if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
715 sds_replenish_threshold = 0;
716 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
717 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
720 hw->sds[sds_idx].sdsr_next = comp_idx;
724 if (ha->hw.enable_soft_lro) {
725 struct lro_ctrl *lro;
727 lro = &ha->hw.sds[sds_idx].lro;
729 #if (__FreeBSD_version >= 1100101)
731 tcp_lro_flush_all(lro);
734 struct lro_entry *queued;
736 while ((!SLIST_EMPTY(&lro->lro_active))) {
737 queued = SLIST_FIRST(&lro->lro_active);
738 SLIST_REMOVE_HEAD(&lro->lro_active, next);
739 tcp_lro_flush(lro, queued);
742 #endif /* #if (__FreeBSD_version >= 1100101) */
746 if (ha->flags.stop_rcv)
747 goto ql_rcv_isr_exit;
749 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
750 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
751 hw->sds[sds_idx].sdsr_next = comp_idx;
753 hw->sds[sds_idx].spurious_intr_count++;
755 if (ha->hw.num_rds_rings > 1)
758 sdsp = &ha->hw.sds[sds_idx];
760 if (sdsp->rx_free > ha->std_replenish)
761 qla_replenish_normal_rx(ha, sdsp, r_idx);
764 sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
765 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
771 hw->sds[sds_idx].rcv_active = 0;
777 ql_mbx_isr(void *arg)
781 uint32_t prev_link_state;
786 device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__);
790 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
791 if ((data & 0x3) != 0x1) {
792 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
796 data = READ_REG32(ha, Q8_FW_MBOX0);
798 if ((data & 0xF000) != 0x8000)
801 data = data & 0xFFFF;
805 case 0x8001: /* It's an AEN */
807 ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
809 data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
810 ha->hw.cable_length = data & 0xFFFF;
813 ha->hw.link_speed = data & 0xFFF;
815 data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
817 prev_link_state = ha->hw.link_up;
818 ha->hw.link_up = (((data & 0xFF) == 0) ? 0 : 1);
820 if (prev_link_state != ha->hw.link_up) {
822 if_link_state_change(ha->ifp, LINK_STATE_UP);
824 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
828 ha->hw.module_type = ((data >> 8) & 0xFF);
829 ha->hw.flags.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
830 ha->hw.flags.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
832 data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
833 ha->hw.flags.loopback_mode = data & 0x03;
835 ha->hw.link_faults = (data >> 3) & 0xFF;
845 ha->hw.aen_mb0 = 0x8101;
846 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
847 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
848 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
849 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
853 /* for now just dump the registers */
857 ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
858 ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
859 ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
860 ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
861 ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
863 device_printf(ha->pci_dev, "%s: "
864 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
865 __func__, data, ombx[0], ombx[1], ombx[2],
872 /* sfp insertion aen */
873 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
874 __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
878 /* sfp removal aen */
879 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
886 ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
887 ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
888 ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
890 device_printf(ha->pci_dev, "%s: "
891 "0x%08x 0x%08x 0x%08x 0x%08x \n",
892 __func__, data, ombx[0], ombx[1], ombx[2]);
897 device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
900 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
901 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
907 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
910 int count = sdsp->rx_free;
914 /* we can play with this value via a sysctl */
915 uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
917 rdesc = &ha->hw.rds[r_idx];
919 rx_next = rdesc->rx_next;
922 rxb = sdsp->rxb_free;
927 sdsp->rxb_free = rxb->next;
930 if (ql_get_mbuf(ha, rxb, NULL) == 0) {
931 qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
933 rxb->paddr, (rxb->m_head)->m_pkthdr.len);
935 if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
938 if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
941 device_printf(ha->pci_dev,
942 "%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
943 __func__, r_idx, rdesc->rx_in, rxb->handle);
946 rxb->next = sdsp->rxb_free;
947 sdsp->rxb_free = rxb;
952 if (replenish_thresh-- == 0) {
953 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
955 rx_next = rdesc->rx_next;
956 replenish_thresh = ha->hw.rds_pidx_thres;
960 if (rx_next != rdesc->rx_next) {
961 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
969 qla_ivec_t *ivec = arg;
980 if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
984 fp = &ha->tx_fp[idx];
986 if ((fp->fp_taskqueue != NULL) &&
987 (ifp->if_drv_flags & IFF_DRV_RUNNING))
988 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);