2 * Copyright (c) 2013-2016 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
40 #include "ql_inline.h"
45 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
49 qla_rcv_error(qla_host_t *ha)
51 ha->flags.stop_rcv = 1;
52 ha->qla_initiate_recovery = 1;
58 * Function: Handles normal ethernet frames received
61 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
64 struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
65 struct ifnet *ifp = ha->ifp;
67 struct ether_vlan_header *eh;
68 uint32_t i, rem_len = 0;
70 qla_rx_ring_t *rx_ring;
72 if (ha->hw.num_rds_rings > 1)
75 ha->hw.rds[r_idx].count++;
77 sdsp = &ha->hw.sds[sds_idx];
78 rx_ring = &ha->rx_ring[r_idx];
80 for (i = 0; i < sgc->num_handles; i++) {
81 rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
83 QL_ASSERT(ha, (rxb != NULL),
84 ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
87 if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
89 device_printf(ha->pci_dev,
90 "%s invalid rxb[%d, %d, 0x%04x]\n",
91 __func__, sds_idx, i, sgc->handle[i]);
100 QL_ASSERT(ha, (mp != NULL),
101 ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
104 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
107 rxb->next = sdsp->rxb_free;
108 sdsp->rxb_free = rxb;
111 if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
113 device_printf(ha->pci_dev,
114 "%s mp == NULL [%d, %d, 0x%04x]\n",
115 __func__, sds_idx, i, sgc->handle[i]);
122 mp->m_flags |= M_PKTHDR;
123 mp->m_pkthdr.len = sgc->pkt_length;
124 mp->m_pkthdr.rcvif = ifp;
125 rem_len = mp->m_pkthdr.len;
127 mp->m_flags &= ~M_PKTHDR;
130 rem_len = rem_len - mp->m_len;
134 mpl->m_len = rem_len;
136 eh = mtod(mpf, struct ether_vlan_header *);
138 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
139 uint32_t *data = (uint32_t *)eh;
141 mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
142 mpf->m_flags |= M_VLANTAG;
144 *(data + 3) = *(data + 2);
145 *(data + 2) = *(data + 1);
148 m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
151 if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
152 mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
153 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
154 mpf->m_pkthdr.csum_data = 0xFFFF;
156 mpf->m_pkthdr.csum_flags = 0;
159 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
161 mpf->m_pkthdr.flowid = sgc->rss_hash;
162 M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
164 (*ifp->if_input)(ifp, mpf);
166 if (sdsp->rx_free > ha->std_replenish)
167 qla_replenish_normal_rx(ha, sdsp, r_idx);
172 #define QLA_TCP_HDR_SIZE 20
173 #define QLA_TCP_TS_OPTION_SIZE 12
177 * Function: Handles normal ethernet frames received
180 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
183 struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
184 struct ifnet *ifp = ha->ifp;
186 struct ether_vlan_header *eh;
187 uint32_t i, rem_len = 0, pkt_length, iplen;
189 struct ip *ip = NULL;
190 struct ip6_hdr *ip6 = NULL;
193 qla_rx_ring_t *rx_ring;
195 if (ha->hw.num_rds_rings > 1)
198 ha->hw.rds[r_idx].count++;
200 rx_ring = &ha->rx_ring[r_idx];
204 sdsp = &ha->hw.sds[sds_idx];
206 pkt_length = sgc->payload_length + sgc->l4_offset;
208 if (sgc->flags & Q8_LRO_COMP_TS) {
209 pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
211 pkt_length += QLA_TCP_HDR_SIZE;
213 ha->lro_bytes += pkt_length;
215 for (i = 0; i < sgc->num_handles; i++) {
216 rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
218 QL_ASSERT(ha, (rxb != NULL),
219 ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
222 if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
224 device_printf(ha->pci_dev,
225 "%s invalid rxb[%d, %d, 0x%04x]\n",
226 __func__, sds_idx, i, sgc->handle[i]);
235 QL_ASSERT(ha, (mp != NULL),
236 ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
239 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
242 rxb->next = sdsp->rxb_free;
243 sdsp->rxb_free = rxb;
246 if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
248 device_printf(ha->pci_dev,
249 "%s mp == NULL [%d, %d, 0x%04x]\n",
250 __func__, sds_idx, i, sgc->handle[i]);
257 mp->m_flags |= M_PKTHDR;
258 mp->m_pkthdr.len = pkt_length;
259 mp->m_pkthdr.rcvif = ifp;
260 rem_len = mp->m_pkthdr.len;
262 mp->m_flags &= ~M_PKTHDR;
265 rem_len = rem_len - mp->m_len;
269 mpl->m_len = rem_len;
271 th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
273 if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
274 th->th_flags |= TH_PUSH;
276 m_adj(mpf, sgc->l2_offset);
278 eh = mtod(mpf, struct ether_vlan_header *);
280 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
281 uint32_t *data = (uint32_t *)eh;
283 mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
284 mpf->m_flags |= M_VLANTAG;
286 *(data + 3) = *(data + 2);
287 *(data + 2) = *(data + 1);
290 m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
292 etype = ntohs(eh->evl_proto);
294 etype = ntohs(eh->evl_encap_proto);
297 if (etype == ETHERTYPE_IP) {
298 ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
300 iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
303 ip->ip_len = htons(iplen);
307 M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
309 } else if (etype == ETHERTYPE_IPV6) {
310 ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
312 iplen = (th->th_off << 2) + sgc->payload_length;
314 ip6->ip6_plen = htons(iplen);
318 M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
323 if (sdsp->rx_free > ha->std_replenish)
324 qla_replenish_normal_rx(ha, sdsp, r_idx);
328 mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
329 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
330 mpf->m_pkthdr.csum_data = 0xFFFF;
332 mpf->m_pkthdr.flowid = sgc->rss_hash;
334 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
336 (*ifp->if_input)(ifp, mpf);
338 if (sdsp->rx_free > ha->std_replenish)
339 qla_replenish_normal_rx(ha, sdsp, r_idx);
345 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
346 uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
349 uint16_t num_handles;
350 q80_stat_desc_t *sdesc;
356 for (i = 0; i < dcount; i++) {
357 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
358 sdesc = (q80_stat_desc_t *)
359 &ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
361 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
364 device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
365 __func__, (void *)sdesc->data[0],
366 (void *)sdesc->data[1]);
370 num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
372 device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
373 __func__, (void *)sdesc->data[0],
374 (void *)sdesc->data[1]);
378 if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
381 switch (num_handles) {
384 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
388 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
389 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
393 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
394 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
395 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
399 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
400 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
401 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
402 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
406 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
407 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
408 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
409 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
410 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
414 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
415 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
416 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
417 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
418 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
419 *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
423 *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
424 *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
425 *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
426 *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
427 *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
428 *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
429 *handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
433 device_printf(ha->pci_dev,
434 "%s: invalid num handles %p %p\n",
435 __func__, (void *)sdesc->data[0],
436 (void *)sdesc->data[1]);
439 ("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
440 __func__, "invalid num handles", sds_idx, num_handles,
441 (void *)sdesc->data[0],(void *)sdesc->data[1]));
446 *nhandles = *nhandles + num_handles;
453 * Function: Main Interrupt Service Routine
456 qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
460 uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
461 volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
465 uint32_t sds_replenish_threshold = 0;
472 hw->sds[sds_idx].rcv_active = 1;
473 if (ha->flags.stop_rcv) {
474 hw->sds[sds_idx].rcv_active = 0;
478 QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
483 comp_idx = hw->sds[sds_idx].sdsr_next;
485 while (count-- && !ha->flags.stop_rcv) {
487 sdesc = (q80_stat_desc_t *)
488 &hw->sds[sds_idx].sds_ring_base[comp_idx];
490 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
495 hw->sds[sds_idx].intr_count++;
498 case Q8_STAT_DESC_OPCODE_RCV_PKT:
502 bzero(&sgc, sizeof(qla_sgl_comp_t));
505 Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
506 sgc.rcv.num_handles = 1;
508 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
509 sgc.rcv.chksum_status =
510 Q8_STAT_DESC_STATUS((sdesc->data[1]));
513 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
515 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
517 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
519 qla_rx_intr(ha, &sgc.rcv, sds_idx);
522 case Q8_STAT_DESC_OPCODE_SGL_RCV:
525 Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
527 if (desc_count > 1) {
528 c_idx = (comp_idx + desc_count -1) &
529 (NUM_STATUS_DESCRIPTORS-1);
530 sdesc0 = (q80_stat_desc_t *)
531 &hw->sds[sds_idx].sds_ring_base[c_idx];
533 if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
534 Q8_STAT_DESC_OPCODE_CONT) {
540 bzero(&sgc, sizeof(qla_sgl_comp_t));
543 Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
545 sgc.rcv.chksum_status =
546 Q8_STAT_DESC_STATUS((sdesc->data[1]));
549 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
551 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
553 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
556 QL_ASSERT(ha, (desc_count <= 2) ,\
557 ("%s: [sds_idx, data0, data1]="\
558 "%d, %p, %p]\n", __func__, sds_idx,\
559 (void *)sdesc->data[0],\
560 (void *)sdesc->data[1]));
562 sgc.rcv.num_handles = 1;
564 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
566 if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
567 &sgc.rcv.handle[1], &nhandles)) {
569 "%s: [sds_idx, dcount, data0, data1]="
570 "[%d, %d, 0x%llx, 0x%llx]\n",
571 __func__, sds_idx, desc_count,
572 (long long unsigned int)sdesc->data[0],
573 (long long unsigned int)sdesc->data[1]);
578 sgc.rcv.num_handles += nhandles;
580 qla_rx_intr(ha, &sgc.rcv, sds_idx);
584 case Q8_STAT_DESC_OPCODE_SGL_LRO:
587 Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
589 if (desc_count > 1) {
590 c_idx = (comp_idx + desc_count -1) &
591 (NUM_STATUS_DESCRIPTORS-1);
592 sdesc0 = (q80_stat_desc_t *)
593 &hw->sds[sds_idx].sds_ring_base[c_idx];
595 if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
596 Q8_STAT_DESC_OPCODE_CONT) {
601 bzero(&sgc, sizeof(qla_sgl_comp_t));
603 sgc.lro.payload_length =
604 Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
607 Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
609 sgc.lro.num_handles = 1;
611 Q8_STAT_DESC_HANDLE((sdesc->data[0]));
613 if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
614 sgc.lro.flags |= Q8_LRO_COMP_TS;
616 if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
617 sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
620 Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
622 Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
624 if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
626 Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
629 QL_ASSERT(ha, (desc_count <= 7) ,\
630 ("%s: [sds_idx, data0, data1]="\
631 "[%d, 0x%llx, 0x%llx]\n",\
633 (long long unsigned int)sdesc->data[0],\
634 (long long unsigned int)sdesc->data[1]));
636 if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
637 desc_count, &sgc.lro.handle[1], &nhandles)) {
639 "%s: [sds_idx, data0, data1]="\
640 "[%d, 0x%llx, 0x%llx]\n",\
642 (long long unsigned int)sdesc->data[0],\
643 (long long unsigned int)sdesc->data[1]);
649 sgc.lro.num_handles += nhandles;
651 if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
653 "%s: [sds_idx, data0, data1]="\
654 "[%d, 0x%llx, 0x%llx]\n",\
656 (long long unsigned int)sdesc->data[0],\
657 (long long unsigned int)sdesc->data[1]);
659 "%s: [comp_idx, c_idx, dcount, nhndls]="\
660 "[%d, %d, %d, %d]\n",\
661 __func__, comp_idx, c_idx, desc_count,
662 sgc.lro.num_handles);
663 if (desc_count > 1) {
665 "%s: [sds_idx, data0, data1]="\
666 "[%d, 0x%llx, 0x%llx]\n",\
668 (long long unsigned int)sdesc0->data[0],\
669 (long long unsigned int)sdesc0->data[1]);
676 device_printf(dev, "%s: default 0x%llx!\n", __func__,
677 (long long unsigned int)sdesc->data[0]);
684 sds_replenish_threshold += desc_count;
687 while (desc_count--) {
688 sdesc->data[0] = 0ULL;
689 sdesc->data[1] = 0ULL;
690 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
691 sdesc = (q80_stat_desc_t *)
692 &hw->sds[sds_idx].sds_ring_base[comp_idx];
695 if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
696 sds_replenish_threshold = 0;
697 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
698 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
701 hw->sds[sds_idx].sdsr_next = comp_idx;
705 if (ha->flags.stop_rcv)
706 goto qla_rcv_isr_exit;
708 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
709 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
710 hw->sds[sds_idx].sdsr_next = comp_idx;
712 hw->sds[sds_idx].spurious_intr_count++;
714 if (ha->hw.num_rds_rings > 1)
717 sdsp = &ha->hw.sds[sds_idx];
719 if (sdsp->rx_free > ha->std_replenish)
720 qla_replenish_normal_rx(ha, sdsp, r_idx);
723 sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
724 opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
730 hw->sds[sds_idx].rcv_active = 0;
736 ql_mbx_isr(void *arg)
740 uint32_t prev_link_state;
745 device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__);
749 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
750 if ((data & 0x3) != 0x1) {
751 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
755 data = READ_REG32(ha, Q8_FW_MBOX0);
757 if ((data & 0xF000) != 0x8000)
760 data = data & 0xFFFF;
764 case 0x8001: /* It's an AEN */
766 ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
768 data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
769 ha->hw.cable_length = data & 0xFFFF;
772 ha->hw.link_speed = data & 0xFFF;
774 data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
776 prev_link_state = ha->hw.link_up;
777 ha->hw.link_up = (((data & 0xFF) == 0) ? 0 : 1);
779 if (prev_link_state != ha->hw.link_up) {
781 if_link_state_change(ha->ifp, LINK_STATE_UP);
783 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
787 ha->hw.module_type = ((data >> 8) & 0xFF);
788 ha->hw.flags.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
789 ha->hw.flags.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
791 data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
792 ha->hw.flags.loopback_mode = data & 0x03;
794 ha->hw.link_faults = (data >> 3) & 0xFF;
804 ha->hw.aen_mb0 = 0x8101;
805 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
806 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
807 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
808 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
812 /* for now just dump the registers */
816 ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
817 ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
818 ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
819 ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
820 ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
822 device_printf(ha->pci_dev, "%s: "
823 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
824 __func__, data, ombx[0], ombx[1], ombx[2],
831 /* sfp insertion aen */
832 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
833 __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
837 /* sfp removal aen */
838 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
845 ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
846 ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
847 ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
849 device_printf(ha->pci_dev, "%s: "
850 "0x%08x 0x%08x 0x%08x 0x%08x \n",
851 __func__, data, ombx[0], ombx[1], ombx[2]);
856 device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
859 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
860 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
866 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
869 int count = sdsp->rx_free;
873 /* we can play with this value via a sysctl */
874 uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
876 rdesc = &ha->hw.rds[r_idx];
878 rx_next = rdesc->rx_next;
881 rxb = sdsp->rxb_free;
886 sdsp->rxb_free = rxb->next;
889 if (ql_get_mbuf(ha, rxb, NULL) == 0) {
890 qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
892 rxb->paddr, (rxb->m_head)->m_pkthdr.len);
894 if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
897 if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
900 device_printf(ha->pci_dev,
901 "%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
902 __func__, r_idx, rdesc->rx_in, rxb->handle);
905 rxb->next = sdsp->rxb_free;
906 sdsp->rxb_free = rxb;
911 if (replenish_thresh-- == 0) {
912 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
914 rx_next = rdesc->rx_next;
915 replenish_thresh = ha->hw.rds_pidx_thres;
919 if (rx_next != rdesc->rx_next) {
920 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
928 qla_ivec_t *ivec = arg;
939 if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
943 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
945 ret = qla_rcv_isr(ha, idx, -1);
948 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
950 if (!ha->flags.stop_rcv) {
951 QL_ENABLE_INTERRUPTS(ha, idx);