2 * Copyright (c) 2013-2016 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
40 #include "ql_inline.h"
45 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
49 qla_rcv_error(qla_host_t *ha)
52 ha->qla_initiate_recovery = 1;
58 * Function: Handles normal ethernet frames received
61 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
64 struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
65 struct ifnet *ifp = ha->ifp;
67 struct ether_vlan_header *eh;
68 uint32_t i, rem_len = 0;
70 qla_rx_ring_t *rx_ring;
73 lro = &ha->hw.sds[sds_idx].lro;
75 if (ha->hw.num_rds_rings > 1)
78 ha->hw.rds[r_idx].count++;
80 sdsp = &ha->hw.sds[sds_idx];
81 rx_ring = &ha->rx_ring[r_idx];
83 for (i = 0; i < sgc->num_handles; i++) {
84 rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
86 QL_ASSERT(ha, (rxb != NULL),
87 ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
90 if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
92 device_printf(ha->pci_dev,
93 "%s invalid rxb[%d, %d, 0x%04x]\n",
94 __func__, sds_idx, i, sgc->handle[i]);
103 QL_ASSERT(ha, (mp != NULL),
104 ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
107 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
110 rxb->next = sdsp->rxb_free;
111 sdsp->rxb_free = rxb;
114 if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
116 device_printf(ha->pci_dev,
117 "%s mp == NULL [%d, %d, 0x%04x]\n",
118 __func__, sds_idx, i, sgc->handle[i]);
125 mp->m_flags |= M_PKTHDR;
126 mp->m_pkthdr.len = sgc->pkt_length;
127 mp->m_pkthdr.rcvif = ifp;
128 rem_len = mp->m_pkthdr.len;
130 mp->m_flags &= ~M_PKTHDR;
133 rem_len = rem_len - mp->m_len;
137 mpl->m_len = rem_len;
139 eh = mtod(mpf, struct ether_vlan_header *);
141 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
142 uint32_t *data = (uint32_t *)eh;
144 mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
145 mpf->m_flags |= M_VLANTAG;
147 *(data + 3) = *(data + 2);
148 *(data + 2) = *(data + 1);
151 m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
154 if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
155 mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
156 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
157 mpf->m_pkthdr.csum_data = 0xFFFF;
159 mpf->m_pkthdr.csum_flags = 0;
162 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
164 mpf->m_pkthdr.flowid = sgc->rss_hash;
166 #if __FreeBSD_version >= 1100000
167 M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
169 #if (__FreeBSD_version >= 903511 && __FreeBSD_version < 1100000)
170 M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
172 M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
174 #endif /* #if __FreeBSD_version >= 1100000 */
176 if (ha->hw.enable_soft_lro) {
178 #if (__FreeBSD_version >= 1100101)
180 tcp_lro_queue_mbuf(lro, mpf);
183 if (tcp_lro_rx(lro, mpf, 0))
184 (*ifp->if_input)(ifp, mpf);
186 #endif /* #if (__FreeBSD_version >= 1100101) */
190 (*ifp->if_input)(ifp, mpf);
193 if (sdsp->rx_free > ha->std_replenish)
194 qla_replenish_normal_rx(ha, sdsp, r_idx);
199 #define QLA_TCP_HDR_SIZE 20
200 #define QLA_TCP_TS_OPTION_SIZE 12
204 * Function: Handles normal ethernet frames received
207 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
210 struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
211 struct ifnet *ifp = ha->ifp;
213 struct ether_vlan_header *eh;
214 uint32_t i, rem_len = 0, pkt_length, iplen;
216 struct ip *ip = NULL;
217 struct ip6_hdr *ip6 = NULL;
220 qla_rx_ring_t *rx_ring;
222 if (ha->hw.num_rds_rings > 1)
225 ha->hw.rds[r_idx].count++;
227 rx_ring = &ha->rx_ring[r_idx];
229 ha->hw.rds[r_idx].lro_pkt_count++;
231 sdsp = &ha->hw.sds[sds_idx];
233 pkt_length = sgc->payload_length + sgc->l4_offset;
235 if (sgc->flags & Q8_LRO_COMP_TS) {
236 pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
238 pkt_length += QLA_TCP_HDR_SIZE;
240 ha->hw.rds[r_idx].lro_bytes += pkt_length;
242 for (i = 0; i < sgc->num_handles; i++) {
243 rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
245 QL_ASSERT(ha, (rxb != NULL),
246 ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
249 if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
251 device_printf(ha->pci_dev,
252 "%s invalid rxb[%d, %d, 0x%04x]\n",
253 __func__, sds_idx, i, sgc->handle[i]);
262 QL_ASSERT(ha, (mp != NULL),
263 ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
266 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
269 rxb->next = sdsp->rxb_free;
270 sdsp->rxb_free = rxb;
273 if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
275 device_printf(ha->pci_dev,
276 "%s mp == NULL [%d, %d, 0x%04x]\n",
277 __func__, sds_idx, i, sgc->handle[i]);
284 mp->m_flags |= M_PKTHDR;
285 mp->m_pkthdr.len = pkt_length;
286 mp->m_pkthdr.rcvif = ifp;
287 rem_len = mp->m_pkthdr.len;
289 mp->m_flags &= ~M_PKTHDR;
292 rem_len = rem_len - mp->m_len;
296 mpl->m_len = rem_len;
298 th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
300 if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
301 th->th_flags |= TH_PUSH;
303 m_adj(mpf, sgc->l2_offset);
305 eh = mtod(mpf, struct ether_vlan_header *);
307 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
308 uint32_t *data = (uint32_t *)eh;
310 mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
311 mpf->m_flags |= M_VLANTAG;
313 *(data + 3) = *(data + 2);
314 *(data + 2) = *(data + 1);
317 m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
319 etype = ntohs(eh->evl_proto);
321 etype = ntohs(eh->evl_encap_proto);
324 if (etype == ETHERTYPE_IP) {
325 ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
327 iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
330 ip->ip_len = htons(iplen);
334 M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
336 } else if (etype == ETHERTYPE_IPV6) {
337 ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
339 iplen = (th->th_off << 2) + sgc->payload_length;
341 ip6->ip6_plen = htons(iplen);
345 M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
350 if (sdsp->rx_free > ha->std_replenish)
351 qla_replenish_normal_rx(ha, sdsp, r_idx);
355 mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
356 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
357 mpf->m_pkthdr.csum_data = 0xFFFF;
359 mpf->m_pkthdr.flowid = sgc->rss_hash;
361 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
363 (*ifp->if_input)(ifp, mpf);
365 if (sdsp->rx_free > ha->std_replenish)
366 qla_replenish_normal_rx(ha, sdsp, r_idx);
372 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
373 uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
376 uint16_t num_handles;
377 q80_stat_desc_t *sdesc;
383 for (i = 0; i < dcount; i++) {
384 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
385 sdesc = (q80_stat_desc_t *)
386 &ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
388 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
391 device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
392 __func__, (void *)sdesc->data[0],
393 (void *)sdesc->data[1]);
397 num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
399 device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
400 __func__, (void *)sdesc->data[0],
401 (void *)sdesc->data[1]);
405 if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
408 switch (num_handles) {
411 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
415 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
416 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
420 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
421 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
422 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
426 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
427 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
428 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
429 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
433 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
434 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
435 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
436 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
437 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
441 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
442 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
443 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
444 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
445 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
446 *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
450 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
451 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
452 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
453 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
454 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
455 *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
456 *handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
460 device_printf(ha->pci_dev,
461 "%s: invalid num handles %p %p\n",
462 __func__, (void *)sdesc->data[0],
463 (void *)sdesc->data[1]);
466 ("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
467 __func__, "invalid num handles", sds_idx, num_handles,
468 (void *)sdesc->data[0],(void *)sdesc->data[1]));
473 *nhandles = *nhandles + num_handles;
480 * Function: Main Interrupt Service Routine
483 ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
487 uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
488 volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
492 uint32_t sds_replenish_threshold = 0;
499 hw->sds[sds_idx].rcv_active = 1;
501 hw->sds[sds_idx].rcv_active = 0;
505 QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
510 comp_idx = hw->sds[sds_idx].sdsr_next;
512 while (count-- && !ha->stop_rcv) {
514 sdesc = (q80_stat_desc_t *)
515 &hw->sds[sds_idx].sds_ring_base[comp_idx];
517 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
524 case Q8_STAT_DESC_OPCODE_RCV_PKT:
528 bzero(&sgc, sizeof(qla_sgl_comp_t));
531 Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
532 sgc.rcv.num_handles = 1;
534 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
535 sgc.rcv.chksum_status =
536 Q8_STAT_DESC_STATUS((sdesc->data[1]));
539 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
541 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
543 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
545 qla_rx_intr(ha, &sgc.rcv, sds_idx);
548 case Q8_STAT_DESC_OPCODE_SGL_RCV:
551 Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
553 if (desc_count > 1) {
554 c_idx = (comp_idx + desc_count -1) &
555 (NUM_STATUS_DESCRIPTORS-1);
556 sdesc0 = (q80_stat_desc_t *)
557 &hw->sds[sds_idx].sds_ring_base[c_idx];
559 if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
560 Q8_STAT_DESC_OPCODE_CONT) {
566 bzero(&sgc, sizeof(qla_sgl_comp_t));
569 Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
571 sgc.rcv.chksum_status =
572 Q8_STAT_DESC_STATUS((sdesc->data[1]));
575 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
577 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
579 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
582 QL_ASSERT(ha, (desc_count <= 2) ,\
583 ("%s: [sds_idx, data0, data1]="\
584 "%d, %p, %p]\n", __func__, sds_idx,\
585 (void *)sdesc->data[0],\
586 (void *)sdesc->data[1]));
588 sgc.rcv.num_handles = 1;
590 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
592 if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
593 &sgc.rcv.handle[1], &nhandles)) {
595 "%s: [sds_idx, dcount, data0, data1]="
596 "[%d, %d, 0x%llx, 0x%llx]\n",
597 __func__, sds_idx, desc_count,
598 (long long unsigned int)sdesc->data[0],
599 (long long unsigned int)sdesc->data[1]);
604 sgc.rcv.num_handles += nhandles;
606 qla_rx_intr(ha, &sgc.rcv, sds_idx);
610 case Q8_STAT_DESC_OPCODE_SGL_LRO:
613 Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
615 if (desc_count > 1) {
616 c_idx = (comp_idx + desc_count -1) &
617 (NUM_STATUS_DESCRIPTORS-1);
618 sdesc0 = (q80_stat_desc_t *)
619 &hw->sds[sds_idx].sds_ring_base[c_idx];
621 if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
622 Q8_STAT_DESC_OPCODE_CONT) {
627 bzero(&sgc, sizeof(qla_sgl_comp_t));
629 sgc.lro.payload_length =
630 Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
633 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
635 sgc.lro.num_handles = 1;
637 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
639 if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
640 sgc.lro.flags |= Q8_LRO_COMP_TS;
642 if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
643 sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
646 Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
648 Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
650 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
652 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
655 QL_ASSERT(ha, (desc_count <= 7) ,\
656 ("%s: [sds_idx, data0, data1]="\
657 "[%d, 0x%llx, 0x%llx]\n",\
659 (long long unsigned int)sdesc->data[0],\
660 (long long unsigned int)sdesc->data[1]));
662 if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
663 desc_count, &sgc.lro.handle[1], &nhandles)) {
665 "%s: [sds_idx, data0, data1]="\
666 "[%d, 0x%llx, 0x%llx]\n",\
668 (long long unsigned int)sdesc->data[0],\
669 (long long unsigned int)sdesc->data[1]);
675 sgc.lro.num_handles += nhandles;
677 if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
679 "%s: [sds_idx, data0, data1]="\
680 "[%d, 0x%llx, 0x%llx]\n",\
682 (long long unsigned int)sdesc->data[0],\
683 (long long unsigned int)sdesc->data[1]);
685 "%s: [comp_idx, c_idx, dcount, nhndls]="\
686 "[%d, %d, %d, %d]\n",\
687 __func__, comp_idx, c_idx, desc_count,
688 sgc.lro.num_handles);
689 if (desc_count > 1) {
691 "%s: [sds_idx, data0, data1]="\
692 "[%d, 0x%llx, 0x%llx]\n",\
694 (long long unsigned int)sdesc0->data[0],\
695 (long long unsigned int)sdesc0->data[1]);
702 device_printf(dev, "%s: default 0x%llx!\n", __func__,
703 (long long unsigned int)sdesc->data[0]);
710 sds_replenish_threshold += desc_count;
713 while (desc_count--) {
714 sdesc->data[0] = 0ULL;
715 sdesc->data[1] = 0ULL;
716 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
717 sdesc = (q80_stat_desc_t *)
718 &hw->sds[sds_idx].sds_ring_base[comp_idx];
721 if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
722 sds_replenish_threshold = 0;
723 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
724 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
727 hw->sds[sds_idx].sdsr_next = comp_idx;
731 if (ha->hw.enable_soft_lro) {
732 struct lro_ctrl *lro;
734 lro = &ha->hw.sds[sds_idx].lro;
736 #if (__FreeBSD_version >= 1100101)
738 tcp_lro_flush_all(lro);
741 struct lro_entry *queued;
743 while ((!SLIST_EMPTY(&lro->lro_active))) {
744 queued = SLIST_FIRST(&lro->lro_active);
745 SLIST_REMOVE_HEAD(&lro->lro_active, next);
746 tcp_lro_flush(lro, queued);
749 #endif /* #if (__FreeBSD_version >= 1100101) */
754 goto ql_rcv_isr_exit;
756 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
757 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
758 hw->sds[sds_idx].sdsr_next = comp_idx;
760 if (ha->hw.num_rds_rings > 1)
763 sdsp = &ha->hw.sds[sds_idx];
765 if (sdsp->rx_free > ha->std_replenish)
766 qla_replenish_normal_rx(ha, sdsp, r_idx);
769 sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
770 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
776 hw->sds[sds_idx].rcv_active = 0;
782 ql_mbx_isr(void *arg)
786 uint32_t prev_link_state;
791 device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__);
795 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
796 if ((data & 0x3) != 0x1) {
797 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
801 data = READ_REG32(ha, Q8_FW_MBOX0);
803 if ((data & 0xF000) != 0x8000)
806 data = data & 0xFFFF;
810 case 0x8001: /* It's an AEN */
812 ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
814 data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
815 ha->hw.cable_length = data & 0xFFFF;
818 ha->hw.link_speed = data & 0xFFF;
820 data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
822 prev_link_state = ha->hw.link_up;
823 ha->hw.link_up = (((data & 0xFF) == 0) ? 0 : 1);
825 if (prev_link_state != ha->hw.link_up) {
827 if_link_state_change(ha->ifp, LINK_STATE_UP);
829 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
833 ha->hw.module_type = ((data >> 8) & 0xFF);
834 ha->hw.flags.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
835 ha->hw.flags.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
837 data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
838 ha->hw.flags.loopback_mode = data & 0x03;
840 ha->hw.link_faults = (data >> 3) & 0xFF;
850 ha->hw.aen_mb0 = 0x8101;
851 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
852 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
853 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
854 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
858 /* for now just dump the registers */
862 ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
863 ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
864 ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
865 ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
866 ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
868 device_printf(ha->pci_dev, "%s: "
869 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
870 __func__, data, ombx[0], ombx[1], ombx[2],
877 /* sfp insertion aen */
878 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
879 __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
883 /* sfp removal aen */
884 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
891 ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
892 ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
893 ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
895 device_printf(ha->pci_dev, "%s: "
896 "0x%08x 0x%08x 0x%08x 0x%08x \n",
897 __func__, data, ombx[0], ombx[1], ombx[2]);
902 device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
905 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
906 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
912 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
915 int count = sdsp->rx_free;
919 /* we can play with this value via a sysctl */
920 uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
922 rdesc = &ha->hw.rds[r_idx];
924 rx_next = rdesc->rx_next;
927 rxb = sdsp->rxb_free;
932 sdsp->rxb_free = rxb->next;
935 if (ql_get_mbuf(ha, rxb, NULL) == 0) {
936 qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
938 rxb->paddr, (rxb->m_head)->m_pkthdr.len);
940 if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
943 if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
946 device_printf(ha->pci_dev,
947 "%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
948 __func__, r_idx, rdesc->rx_in, rxb->handle);
951 rxb->next = sdsp->rxb_free;
952 sdsp->rxb_free = rxb;
957 if (replenish_thresh-- == 0) {
958 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
960 rx_next = rdesc->rx_next;
961 replenish_thresh = ha->hw.rds_pidx_thres;
965 if (rx_next != rdesc->rx_next) {
966 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
974 qla_ivec_t *ivec = arg;
985 if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
988 fp = &ha->tx_fp[idx];
989 hw->sds[idx].intr_count++;
991 if ((fp->fp_taskqueue != NULL) &&
992 (ifp->if_drv_flags & IFF_DRV_RUNNING))
993 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);