]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_hw.c
MFC r289635
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44
45 /*
46  * Static Functions
47  */
48
49 static void qla_del_rcv_cntxt(qla_host_t *ha);
50 static int qla_init_rcv_cntxt(qla_host_t *ha);
51 static void qla_del_xmt_cntxt(qla_host_t *ha);
52 static int qla_init_xmt_cntxt(qla_host_t *ha);
53 static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57         uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60         int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65                 uint8_t *hdr);
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_hw_del_all_mcast(qla_host_t *ha);
68 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
69
70 static int qla_init_nic_func(qla_host_t *ha);
71 static int qla_stop_nic_func(qla_host_t *ha);
72 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
73 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
74 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
75 static void qla_get_quick_stats(qla_host_t *ha);
76
77 static int qla_minidump_init(qla_host_t *ha);
78 static void qla_minidump_free(qla_host_t *ha);
79
80
81 static int
82 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
83 {
84         int err = 0, ret;
85         qla_host_t *ha;
86         uint32_t i;
87
88         err = sysctl_handle_int(oidp, &ret, 0, req);
89
90         if (err || !req->newptr)
91                 return (err);
92
93         if (ret == 1) {
94
95                 ha = (qla_host_t *)arg1;
96
97                 for (i = 0; i < ha->hw.num_sds_rings; i++) 
98                         device_printf(ha->pci_dev,
99                                 "%s: sds_ring[%d] = %p\n", __func__,i,
100                                 (void *)ha->hw.sds[i].intr_count);
101
102                 for (i = 0; i < ha->hw.num_tx_rings; i++) 
103                         device_printf(ha->pci_dev,
104                                 "%s: tx[%d] = %p\n", __func__,i,
105                                 (void *)ha->tx_ring[i].count);
106
107                 for (i = 0; i < ha->hw.num_rds_rings; i++)
108                         device_printf(ha->pci_dev,
109                                 "%s: rds_ring[%d] = %p\n", __func__,i,
110                                 (void *)ha->hw.rds[i].count);
111
112                 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
113                         (void *)ha->lro_pkt_count);
114
115                 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
116                         (void *)ha->lro_bytes);
117
118 #ifdef QL_ENABLE_ISCSI_TLV
119                 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
120                         (void *)ha->hw.iscsi_pkt_count);
121 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
122
123         }
124         return (err);
125 }
126
127 static int
128 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
129 {
130         int err, ret = 0;
131         qla_host_t *ha;
132
133         err = sysctl_handle_int(oidp, &ret, 0, req);
134
135         if (err || !req->newptr)
136                 return (err);
137
138         if (ret == 1) {
139                 ha = (qla_host_t *)arg1;
140                 qla_get_quick_stats(ha);
141         }
142         return (err);
143 }
144
145 #ifdef QL_DBG
146
147 static void
148 qla_stop_pegs(qla_host_t *ha)
149 {
150         uint32_t val = 1;
151
152         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
153         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
154         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
155         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
156         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
157         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
158 }
159
160 static int
161 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
162 {
163         int err, ret = 0;
164         qla_host_t *ha;
165         
166         err = sysctl_handle_int(oidp, &ret, 0, req);
167
168
169         if (err || !req->newptr)
170                 return (err);
171
172         if (ret == 1) {
173                 ha = (qla_host_t *)arg1;
174                 (void)QLA_LOCK(ha, __func__, 0);
175                 qla_stop_pegs(ha);      
176                 QLA_UNLOCK(ha, __func__);
177         }
178
179         return err;
180 }
181 #endif /* #ifdef QL_DBG */
182
183 static int
184 qla_validate_set_port_cfg_bit(uint32_t bits)
185 {
186         if ((bits & 0xF) > 1)
187                 return (-1);
188
189         if (((bits >> 4) & 0xF) > 2)
190                 return (-1);
191
192         if (((bits >> 8) & 0xF) > 2)
193                 return (-1);
194
195         return (0);
196 }
197
198 static int
199 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
200 {
201         int err, ret = 0;
202         qla_host_t *ha;
203         uint32_t cfg_bits;
204
205         err = sysctl_handle_int(oidp, &ret, 0, req);
206
207         if (err || !req->newptr)
208                 return (err);
209
210         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
211
212                 ha = (qla_host_t *)arg1;
213
214                 err = qla_get_port_config(ha, &cfg_bits);
215
216                 if (err)
217                         goto qla_sysctl_set_port_cfg_exit;
218
219                 if (ret & 0x1) {
220                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
221                 } else {
222                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
223                 }
224
225                 ret = ret >> 4;
226                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
227
228                 if ((ret & 0xF) == 0) {
229                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
230                 } else if ((ret & 0xF) == 1){
231                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
232                 } else {
233                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
234                 }
235
236                 ret = ret >> 4;
237                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
238
239                 if (ret == 0) {
240                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
241                 } else if (ret == 1){
242                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
243                 } else {
244                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
245                 }
246
247                 err = qla_set_port_config(ha, cfg_bits);
248         } else {
249                 ha = (qla_host_t *)arg1;
250
251                 err = qla_get_port_config(ha, &cfg_bits);
252         }
253
254 qla_sysctl_set_port_cfg_exit:
255         return err;
256 }
257
258 /*
259  * Name: ql_hw_add_sysctls
260  * Function: Add P3Plus specific sysctls
261  */
262 void
263 ql_hw_add_sysctls(qla_host_t *ha)
264 {
265         device_t        dev;
266
267         dev = ha->pci_dev;
268
269         ha->hw.num_sds_rings = MAX_SDS_RINGS;
270         ha->hw.num_rds_rings = MAX_RDS_RINGS;
271         ha->hw.num_tx_rings = NUM_TX_RINGS;
272
273         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
274                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
276                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
277
278         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
279                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
280                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
281                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
282
283         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
284                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
285                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
286                 ha->hw.num_tx_rings, "Number of Transmit Rings");
287
288         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
289                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
290                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
291                 ha->txr_idx, "Tx Ring Used");
292
293         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
294                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
295                 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
296                 (void *)ha, 0,
297                 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
298
299         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
300                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
301                 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
302                 (void *)ha, 0,
303                 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
304
305         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
306                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
307                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
308                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
309
310         ha->hw.sds_cidx_thres = 32;
311         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
312                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
314                 ha->hw.sds_cidx_thres,
315                 "Number of SDS entries to process before updating"
316                 " SDS Ring Consumer Index");
317
318         ha->hw.rds_pidx_thres = 32;
319         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
320                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
321                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
322                 ha->hw.rds_pidx_thres,
323                 "Number of Rcv Rings Entries to post before updating"
324                 " RDS Ring Producer Index");
325
326         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
327         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
328                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
330                 &ha->hw.rcv_intr_coalesce,
331                 ha->hw.rcv_intr_coalesce,
332                 "Rcv Intr Coalescing Parameters\n"
333                 "\tbits 15:0 max packets\n"
334                 "\tbits 31:16 max micro-seconds to wait\n"
335                 "\tplease run\n"
336                 "\tifconfig <if> down && ifconfig <if> up\n"
337                 "\tto take effect \n");
338
339         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
340         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
341                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
342                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
343                 &ha->hw.xmt_intr_coalesce,
344                 ha->hw.xmt_intr_coalesce,
345                 "Xmt Intr Coalescing Parameters\n"
346                 "\tbits 15:0 max packets\n"
347                 "\tbits 31:16 max micro-seconds to wait\n"
348                 "\tplease run\n"
349                 "\tifconfig <if> down && ifconfig <if> up\n"
350                 "\tto take effect \n");
351
352         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
353                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
354                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
355                 (void *)ha, 0,
356                 qla_sysctl_port_cfg, "I",
357                         "Set Port Configuration if values below "
358                         "otherwise Get Port Configuration\n"
359                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
360                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
361                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
362                         " 1 = xmt only; 2 = rcv only;\n"
363                 );
364
365         ha->hw.enable_9kb = 1;
366
367         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
368                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
369                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
370                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
371
372         ha->hw.mdump_active = 0;
373         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
374                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
375                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
376                 ha->hw.mdump_active,
377                 "Minidump Utility is Active \n"
378                 "\t 0 = Minidump Utility is not active\n"
379                 "\t 1 = Minidump Utility is retrieved on this port\n"
380                 "\t 2 = Minidump Utility is retrieved on the other port\n");
381
382         ha->hw.mdump_start = 0;
383         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
384                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
385                 OID_AUTO, "minidump_start", CTLFLAG_RW,
386                 &ha->hw.mdump_start, ha->hw.mdump_start,
387                 "Minidump Utility can start minidump process");
388 #ifdef QL_DBG
389
390         ha->err_inject = 0;
391         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
392                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
393                 OID_AUTO, "err_inject",
394                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
395                 "Error to be injected\n"
396                 "\t\t\t 0: No Errors\n"
397                 "\t\t\t 1: rcv: rxb struct invalid\n"
398                 "\t\t\t 2: rcv: mp == NULL\n"
399                 "\t\t\t 3: lro: rxb struct invalid\n"
400                 "\t\t\t 4: lro: mp == NULL\n"
401                 "\t\t\t 5: rcv: num handles invalid\n"
402                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
403                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
404                 "\t\t\t 8: mbx: mailbox command failure\n"
405                 "\t\t\t 9: heartbeat failure\n"
406                 "\t\t\t A: temperature failure\n" );
407
408         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
409                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
410                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
411                 (void *)ha, 0,
412                 qla_sysctl_stop_pegs, "I", "Peg Stop");
413
414 #endif /* #ifdef QL_DBG */
415
416         ha->hw.user_pri_nic = 0;
417         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
418                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
419                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
420                 ha->hw.user_pri_nic,
421                 "VLAN Tag User Priority for Normal Ethernet Packets");
422
423         ha->hw.user_pri_iscsi = 4;
424         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
425                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
426                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
427                 ha->hw.user_pri_iscsi,
428                 "VLAN Tag User Priority for iSCSI Packets");
429
430 }
431
432 void
433 ql_hw_link_status(qla_host_t *ha)
434 {
435         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
436
437         if (ha->hw.link_up) {
438                 device_printf(ha->pci_dev, "link Up\n");
439         } else {
440                 device_printf(ha->pci_dev, "link Down\n");
441         }
442
443         if (ha->hw.flags.fduplex) {
444                 device_printf(ha->pci_dev, "Full Duplex\n");
445         } else {
446                 device_printf(ha->pci_dev, "Half Duplex\n");
447         }
448
449         if (ha->hw.flags.autoneg) {
450                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
451         } else {
452                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
453         }
454
455         switch (ha->hw.link_speed) {
456         case 0x710:
457                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
458                 break;
459
460         case 0x3E8:
461                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
462                 break;
463
464         case 0x64:
465                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
466                 break;
467
468         default:
469                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
470                 break;
471         }
472
473         switch (ha->hw.module_type) {
474
475         case 0x01:
476                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
477                 break;
478
479         case 0x02:
480                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
481                 break;
482
483         case 0x03:
484                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
485                 break;
486
487         case 0x04:
488                 device_printf(ha->pci_dev,
489                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
490                         ha->hw.cable_length);
491                 break;
492
493         case 0x05:
494                 device_printf(ha->pci_dev, "Module Type 10GE Active"
495                         " Limiting Copper(Compliant)[%d m]\n",
496                         ha->hw.cable_length);
497                 break;
498
499         case 0x06:
500                 device_printf(ha->pci_dev,
501                         "Module Type 10GE Passive Copper"
502                         " (Legacy, Best Effort)[%d m]\n",
503                         ha->hw.cable_length);
504                 break;
505
506         case 0x07:
507                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
508                 break;
509
510         case 0x08:
511                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
512                 break;
513
514         case 0x09:
515                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
516                 break;
517
518         case 0x0A:
519                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
520                 break;
521
522         case 0x0B:
523                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
524                         "(Legacy, Best Effort)\n");
525                 break;
526
527         default:
528                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
529                         ha->hw.module_type);
530                 break;
531         }
532
533         if (ha->hw.link_faults == 1)
534                 device_printf(ha->pci_dev, "SFP Power Fault\n");
535 }
536
537 /*
538  * Name: ql_free_dma
539  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
540  */
541 void
542 ql_free_dma(qla_host_t *ha)
543 {
544         uint32_t i;
545
546         if (ha->hw.dma_buf.flags.sds_ring) {
547                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
548                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
549                 }
550                 ha->hw.dma_buf.flags.sds_ring = 0;
551         }
552
553         if (ha->hw.dma_buf.flags.rds_ring) {
554                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
555                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
556                 }
557                 ha->hw.dma_buf.flags.rds_ring = 0;
558         }
559
560         if (ha->hw.dma_buf.flags.tx_ring) {
561                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
562                 ha->hw.dma_buf.flags.tx_ring = 0;
563         }
564         qla_minidump_free(ha);
565 }
566
567 /*
568  * Name: ql_alloc_dma
569  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
570  */
571 int
572 ql_alloc_dma(qla_host_t *ha)
573 {
574         device_t                dev;
575         uint32_t                i, j, size, tx_ring_size;
576         qla_hw_t                *hw;
577         qla_hw_tx_cntxt_t       *tx_cntxt;
578         uint8_t                 *vaddr;
579         bus_addr_t              paddr;
580
581         dev = ha->pci_dev;
582
583         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
584
585         hw = &ha->hw;
586         /*
587          * Allocate Transmit Ring
588          */
589         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
590         size = (tx_ring_size * ha->hw.num_tx_rings);
591
592         hw->dma_buf.tx_ring.alignment = 8;
593         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
594         
595         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
596                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
597                 goto ql_alloc_dma_exit;
598         }
599
600         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
601         paddr = hw->dma_buf.tx_ring.dma_addr;
602         
603         for (i = 0; i < ha->hw.num_tx_rings; i++) {
604                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
605
606                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
607                 tx_cntxt->tx_ring_paddr = paddr;
608
609                 vaddr += tx_ring_size;
610                 paddr += tx_ring_size;
611         }
612
613         for (i = 0; i < ha->hw.num_tx_rings; i++) {
614                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
615
616                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
617                 tx_cntxt->tx_cons_paddr = paddr;
618
619                 vaddr += sizeof (uint32_t);
620                 paddr += sizeof (uint32_t);
621         }
622
623         ha->hw.dma_buf.flags.tx_ring = 1;
624
625         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
626                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
627                 hw->dma_buf.tx_ring.dma_b));
628         /*
629          * Allocate Receive Descriptor Rings
630          */
631
632         for (i = 0; i < hw->num_rds_rings; i++) {
633
634                 hw->dma_buf.rds_ring[i].alignment = 8;
635                 hw->dma_buf.rds_ring[i].size =
636                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
637
638                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
639                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
640                                 __func__, i);
641
642                         for (j = 0; j < i; j++)
643                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
644
645                         goto ql_alloc_dma_exit;
646                 }
647                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
648                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
649                         hw->dma_buf.rds_ring[i].dma_b));
650         }
651
652         hw->dma_buf.flags.rds_ring = 1;
653
654         /*
655          * Allocate Status Descriptor Rings
656          */
657
658         for (i = 0; i < hw->num_sds_rings; i++) {
659                 hw->dma_buf.sds_ring[i].alignment = 8;
660                 hw->dma_buf.sds_ring[i].size =
661                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
662
663                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
664                         device_printf(dev, "%s: sds ring alloc failed\n",
665                                 __func__);
666
667                         for (j = 0; j < i; j++)
668                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
669
670                         goto ql_alloc_dma_exit;
671                 }
672                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
673                         __func__, i,
674                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
675                         hw->dma_buf.sds_ring[i].dma_b));
676         }
677         for (i = 0; i < hw->num_sds_rings; i++) {
678                 hw->sds[i].sds_ring_base =
679                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
680         }
681
682         hw->dma_buf.flags.sds_ring = 1;
683
684         return 0;
685
686 ql_alloc_dma_exit:
687         ql_free_dma(ha);
688         return -1;
689 }
690
691 #define Q8_MBX_MSEC_DELAY       5000
692
693 static int
694 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
695         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
696 {
697         uint32_t i;
698         uint32_t data;
699         int ret = 0;
700
701         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
702                 ret = -3;
703                 ha->qla_initiate_recovery = 1;
704                 goto exit_qla_mbx_cmd;
705         }
706
707         if (no_pause)
708                 i = 1000;
709         else
710                 i = Q8_MBX_MSEC_DELAY;
711
712         while (i) {
713                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
714                 if (data == 0)
715                         break;
716                 if (no_pause) {
717                         DELAY(1000);
718                 } else {
719                         qla_mdelay(__func__, 1);
720                 }
721                 i--;
722         }
723
724         if (i == 0) {
725                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
726                         __func__, data);
727                 ret = -1;
728                 ha->qla_initiate_recovery = 1;
729                 goto exit_qla_mbx_cmd;
730         }
731
732         for (i = 0; i < n_hmbox; i++) {
733                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
734                 h_mbox++;
735         }
736
737         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
738
739
740         i = Q8_MBX_MSEC_DELAY;
741         while (i) {
742                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
743
744                 if ((data & 0x3) == 1) {
745                         data = READ_REG32(ha, Q8_FW_MBOX0);
746                         if ((data & 0xF000) != 0x8000)
747                                 break;
748                 }
749                 if (no_pause) {
750                         DELAY(1000);
751                 } else {
752                         qla_mdelay(__func__, 1);
753                 }
754                 i--;
755         }
756         if (i == 0) {
757                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
758                         __func__, data);
759                 ret = -2;
760                 ha->qla_initiate_recovery = 1;
761                 goto exit_qla_mbx_cmd;
762         }
763
764         for (i = 0; i < n_fwmbox; i++) {
765                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
766         }
767
768         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
769         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
770
771 exit_qla_mbx_cmd:
772         return (ret);
773 }
774
775 int
776 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
777         uint32_t *num_rcvq)
778 {
779         uint32_t *mbox, err;
780         device_t dev = ha->pci_dev;
781
782         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
783
784         mbox = ha->hw.mbox;
785
786         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
787
788         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
789                 device_printf(dev, "%s: failed0\n", __func__);
790                 return (-1);
791         }
792         err = mbox[0] >> 25; 
793
794         if (supports_9kb != NULL) {
795                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
796                         *supports_9kb = 1;
797                 else
798                         *supports_9kb = 0;
799         }
800
801         if (num_rcvq != NULL)
802                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
803
804         if ((err != 1) && (err != 0)) {
805                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
806                 return (-1);
807         }
808         return 0;
809 }
810
811 static int
812 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
813         uint32_t create)
814 {
815         uint32_t i, err;
816         device_t dev = ha->pci_dev;
817         q80_config_intr_t *c_intr;
818         q80_config_intr_rsp_t *c_intr_rsp;
819
820         c_intr = (q80_config_intr_t *)ha->hw.mbox;
821         bzero(c_intr, (sizeof (q80_config_intr_t)));
822
823         c_intr->opcode = Q8_MBX_CONFIG_INTR;
824
825         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
826         c_intr->count_version |= Q8_MBX_CMD_VERSION;
827
828         c_intr->nentries = num_intrs;
829
830         for (i = 0; i < num_intrs; i++) {
831                 if (create) {
832                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
833                         c_intr->intr[i].msix_index = start_idx + 1 + i;
834                 } else {
835                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
836                         c_intr->intr[i].msix_index =
837                                 ha->hw.intr_id[(start_idx + i)];
838                 }
839
840                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
841         }
842
843         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
844                 (sizeof (q80_config_intr_t) >> 2),
845                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
846                 device_printf(dev, "%s: failed0\n", __func__);
847                 return (-1);
848         }
849
850         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
851
852         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
853
854         if (err) {
855                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
856                         c_intr_rsp->nentries);
857
858                 for (i = 0; i < c_intr_rsp->nentries; i++) {
859                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
860                                 __func__, i, 
861                                 c_intr_rsp->intr[i].status,
862                                 c_intr_rsp->intr[i].intr_id,
863                                 c_intr_rsp->intr[i].intr_src);
864                 }
865
866                 return (-1);
867         }
868
869         for (i = 0; ((i < num_intrs) && create); i++) {
870                 if (!c_intr_rsp->intr[i].status) {
871                         ha->hw.intr_id[(start_idx + i)] =
872                                 c_intr_rsp->intr[i].intr_id;
873                         ha->hw.intr_src[(start_idx + i)] =
874                                 c_intr_rsp->intr[i].intr_src;
875                 }
876         }
877
878         return (0);
879 }
880
881 /*
882  * Name: qla_config_rss
883  * Function: Configure RSS for the context/interface.
884  */
885 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
886                         0x8030f20c77cb2da3ULL,
887                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
888                         0x255b0ec26d5a56daULL };
889
890 static int
891 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
892 {
893         q80_config_rss_t        *c_rss;
894         q80_config_rss_rsp_t    *c_rss_rsp;
895         uint32_t                err, i;
896         device_t                dev = ha->pci_dev;
897
898         c_rss = (q80_config_rss_t *)ha->hw.mbox;
899         bzero(c_rss, (sizeof (q80_config_rss_t)));
900
901         c_rss->opcode = Q8_MBX_CONFIG_RSS;
902
903         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
904         c_rss->count_version |= Q8_MBX_CMD_VERSION;
905
906         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
907                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
908         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
909         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
910
911         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
912         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
913
914         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
915
916         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
917         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
918
919         c_rss->cntxt_id = cntxt_id;
920
921         for (i = 0; i < 5; i++) {
922                 c_rss->rss_key[i] = rss_key[i];
923         }
924
925         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
926                 (sizeof (q80_config_rss_t) >> 2),
927                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
928                 device_printf(dev, "%s: failed0\n", __func__);
929                 return (-1);
930         }
931         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
932
933         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
934
935         if (err) {
936                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
937                 return (-1);
938         }
939         return 0;
940 }
941
942 static int
943 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
944         uint16_t cntxt_id, uint8_t *ind_table)
945 {
946         q80_config_rss_ind_table_t      *c_rss_ind;
947         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
948         uint32_t                        err;
949         device_t                        dev = ha->pci_dev;
950
951         if ((count > Q8_RSS_IND_TBL_SIZE) ||
952                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
953                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
954                         start_idx, count);
955                 return (-1);
956         }
957
958         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
959         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
960
961         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
962         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
963         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
964
965         c_rss_ind->start_idx = start_idx;
966         c_rss_ind->end_idx = start_idx + count - 1;
967         c_rss_ind->cntxt_id = cntxt_id;
968         bcopy(ind_table, c_rss_ind->ind_table, count);
969
970         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
971                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
972                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
973                 device_printf(dev, "%s: failed0\n", __func__);
974                 return (-1);
975         }
976
977         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
978         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
979
980         if (err) {
981                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
982                 return (-1);
983         }
984         return 0;
985 }
986
987 /*
988  * Name: qla_config_intr_coalesce
989  * Function: Configure Interrupt Coalescing.
990  */
991 static int
992 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
993         int rcv)
994 {
995         q80_config_intr_coalesc_t       *intrc;
996         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
997         uint32_t                        err, i;
998         device_t                        dev = ha->pci_dev;
999         
1000         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1001         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1002
1003         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1004         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1005         intrc->count_version |= Q8_MBX_CMD_VERSION;
1006
1007         if (rcv) {
1008                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1009                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1010                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1011         } else {
1012                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1013                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1014                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1015         }
1016
1017         intrc->cntxt_id = cntxt_id;
1018
1019         if (tenable) {
1020                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1021                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1022
1023                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1024                         intrc->sds_ring_mask |= (1 << i);
1025                 }
1026                 intrc->ms_timeout = 1000;
1027         }
1028
1029         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1030                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1031                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1032                 device_printf(dev, "%s: failed0\n", __func__);
1033                 return (-1);
1034         }
1035         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1036
1037         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1038
1039         if (err) {
1040                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1041                 return (-1);
1042         }
1043         
1044         return 0;
1045 }
1046
1047
1048 /*
1049  * Name: qla_config_mac_addr
1050  * Function: binds a MAC address to the context/interface.
1051  *      Can be unicast, multicast or broadcast.
1052  */
1053 static int
1054 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
1055 {
1056         q80_config_mac_addr_t           *cmac;
1057         q80_config_mac_addr_rsp_t       *cmac_rsp;
1058         uint32_t                        err;
1059         device_t                        dev = ha->pci_dev;
1060
1061         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1062         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1063
1064         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1065         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1066         cmac->count_version |= Q8_MBX_CMD_VERSION;
1067
1068         if (add_mac) 
1069                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1070         else
1071                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1072                 
1073         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1074
1075         cmac->nmac_entries = 1;
1076         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1077         bcopy(mac_addr, cmac->mac_addr[0].addr, 6); 
1078
1079         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1080                 (sizeof (q80_config_mac_addr_t) >> 2),
1081                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1082                 device_printf(dev, "%s: %s failed0\n", __func__,
1083                         (add_mac ? "Add" : "Del"));
1084                 return (-1);
1085         }
1086         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1087
1088         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1089
1090         if (err) {
1091                 device_printf(dev, "%s: %s "
1092                         "%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
1093                         __func__, (add_mac ? "Add" : "Del"),
1094                         mac_addr[0], mac_addr[1], mac_addr[2],
1095                         mac_addr[3], mac_addr[4], mac_addr[5], err);
1096                 return (-1);
1097         }
1098         
1099         return 0;
1100 }
1101
1102
1103 /*
1104  * Name: qla_set_mac_rcv_mode
1105  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1106  */
1107 static int
1108 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1109 {
1110         q80_config_mac_rcv_mode_t       *rcv_mode;
1111         uint32_t                        err;
1112         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1113         device_t                        dev = ha->pci_dev;
1114
1115         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1116         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1117
1118         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1119         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1120         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1121
1122         rcv_mode->mode = mode;
1123
1124         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1125
1126         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1127                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1128                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1129                 device_printf(dev, "%s: failed0\n", __func__);
1130                 return (-1);
1131         }
1132         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1133
1134         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1135
1136         if (err) {
1137                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1138                 return (-1);
1139         }
1140         
1141         return 0;
1142 }
1143
1144 int
1145 ql_set_promisc(qla_host_t *ha)
1146 {
1147         int ret;
1148
1149         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1150         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1151         return (ret);
1152 }
1153
1154 void
1155 qla_reset_promisc(qla_host_t *ha)
1156 {
1157         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1158         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1159 }
1160
1161 int
1162 ql_set_allmulti(qla_host_t *ha)
1163 {
1164         int ret;
1165
1166         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1167         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1168         return (ret);
1169 }
1170
1171 void
1172 qla_reset_allmulti(qla_host_t *ha)
1173 {
1174         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1175         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1176 }
1177
1178 /*
1179  * Name: ql_set_max_mtu
1180  * Function:
1181  *      Sets the maximum transfer unit size for the specified rcv context.
1182  */
1183 int
1184 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1185 {
1186         device_t                dev;
1187         q80_set_max_mtu_t       *max_mtu;
1188         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1189         uint32_t                err;
1190
1191         dev = ha->pci_dev;
1192
1193         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1194         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1195
1196         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1197         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1198         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1199
1200         max_mtu->cntxt_id = cntxt_id;
1201         max_mtu->mtu = mtu;
1202
1203         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1204                 (sizeof (q80_set_max_mtu_t) >> 2),
1205                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1206                 device_printf(dev, "%s: failed\n", __func__);
1207                 return -1;
1208         }
1209
1210         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1211
1212         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1213
1214         if (err) {
1215                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1216         }
1217
1218         return 0;
1219 }
1220
1221 static int
1222 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1223 {
1224         device_t                dev;
1225         q80_link_event_t        *lnk;
1226         q80_link_event_rsp_t    *lnk_rsp;
1227         uint32_t                err;
1228
1229         dev = ha->pci_dev;
1230
1231         lnk = (q80_link_event_t *)ha->hw.mbox;
1232         bzero(lnk, (sizeof (q80_link_event_t)));
1233
1234         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1235         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1236         lnk->count_version |= Q8_MBX_CMD_VERSION;
1237
1238         lnk->cntxt_id = cntxt_id;
1239         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1240
1241         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1242                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1243                 device_printf(dev, "%s: failed\n", __func__);
1244                 return -1;
1245         }
1246
1247         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1248
1249         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1250
1251         if (err) {
1252                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1253         }
1254
1255         return 0;
1256 }
1257
1258 static int
1259 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1260 {
1261         device_t                dev;
1262         q80_config_fw_lro_t     *fw_lro;
1263         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1264         uint32_t                err;
1265
1266         dev = ha->pci_dev;
1267
1268         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1269         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1270
1271         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1272         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1273         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1274
1275         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1276         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1277
1278         fw_lro->cntxt_id = cntxt_id;
1279
1280         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1281                 (sizeof (q80_config_fw_lro_t) >> 2),
1282                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1283                 device_printf(dev, "%s: failed\n", __func__);
1284                 return -1;
1285         }
1286
1287         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1288
1289         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1290
1291         if (err) {
1292                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1293         }
1294
1295         return 0;
1296 }
1297
1298 static void
1299 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1300 {
1301         device_t dev = ha->pci_dev;
1302
1303         if (i < ha->hw.num_tx_rings) {
1304                 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1305                         __func__, i, xstat->total_bytes);
1306                 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1307                         __func__, i, xstat->total_pkts);
1308                 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1309                         __func__, i, xstat->errors);
1310                 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1311                         __func__, i, xstat->pkts_dropped);
1312                 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1313                         __func__, i, xstat->switch_pkts);
1314                 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1315                         __func__, i, xstat->num_buffers);
1316         } else {
1317                 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1318                         __func__, xstat->total_bytes);
1319                 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1320                         __func__, xstat->total_pkts);
1321                 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1322                         __func__, xstat->errors);
1323                 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1324                         __func__, xstat->pkts_dropped);
1325                 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1326                         __func__, xstat->switch_pkts);
1327                 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1328                         __func__, xstat->num_buffers);
1329         }
1330 }
1331
1332 static void
1333 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1334 {
1335         device_t dev = ha->pci_dev;
1336
1337         device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1338                 rstat->total_bytes);
1339         device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1340                 rstat->total_pkts);
1341         device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1342                 rstat->lro_pkt_count);
1343         device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1344                 rstat->sw_pkt_count);
1345         device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1346                 rstat->ip_chksum_err);
1347         device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1348                 rstat->pkts_wo_acntxts);
1349         device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1350                 __func__, rstat->pkts_dropped_no_sds_card);
1351         device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1352                 __func__, rstat->pkts_dropped_no_sds_host);
1353         device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1354                 rstat->oversized_pkts);
1355         device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1356                 __func__, rstat->pkts_dropped_no_rds);
1357         device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1358                 __func__, rstat->unxpctd_mcast_pkts);
1359         device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1360                 rstat->re1_fbq_error);
1361         device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1362                 rstat->invalid_mac_addr);
1363         device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1364                 rstat->rds_prime_trys);
1365         device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1366                 rstat->rds_prime_success);
1367         device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1368                 rstat->lro_flows_added);
1369         device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1370                 rstat->lro_flows_deleted);
1371         device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1372                 rstat->lro_flows_active);
1373         device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1374                 __func__, rstat->pkts_droped_unknown);
1375 }
1376
1377 static void
1378 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1379 {
1380         device_t dev = ha->pci_dev;
1381
1382         device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1383                 mstat->xmt_frames);
1384         device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1385                 mstat->xmt_bytes);
1386         device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1387                 mstat->xmt_mcast_pkts);
1388         device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1389                 mstat->xmt_bcast_pkts);
1390         device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1391                 mstat->xmt_pause_frames);
1392         device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1393                 mstat->xmt_cntrl_pkts);
1394         device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1395                 __func__, mstat->xmt_pkt_lt_64bytes);
1396         device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1397                 __func__, mstat->xmt_pkt_lt_127bytes);
1398         device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1399                 __func__, mstat->xmt_pkt_lt_255bytes);
1400         device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1401                 __func__, mstat->xmt_pkt_lt_511bytes);
1402         device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1403                 __func__, mstat->xmt_pkt_lt_1023bytes);
1404         device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1405                 __func__, mstat->xmt_pkt_lt_1518bytes);
1406         device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1407                 __func__, mstat->xmt_pkt_gt_1518bytes);
1408
1409         device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1410                 mstat->rcv_frames);
1411         device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1412                 mstat->rcv_bytes);
1413         device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1414                 mstat->rcv_mcast_pkts);
1415         device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1416                 mstat->rcv_bcast_pkts);
1417         device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1418                 mstat->rcv_pause_frames);
1419         device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1420                 mstat->rcv_cntrl_pkts);
1421         device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1422                 __func__, mstat->rcv_pkt_lt_64bytes);
1423         device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1424                 __func__, mstat->rcv_pkt_lt_127bytes);
1425         device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1426                 __func__, mstat->rcv_pkt_lt_255bytes);
1427         device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1428                 __func__, mstat->rcv_pkt_lt_511bytes);
1429         device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1430                 __func__, mstat->rcv_pkt_lt_1023bytes);
1431         device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1432                 __func__, mstat->rcv_pkt_lt_1518bytes);
1433         device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1434                 __func__, mstat->rcv_pkt_gt_1518bytes);
1435
1436         device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1437                 mstat->rcv_len_error);
1438         device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1439                 mstat->rcv_len_small);
1440         device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1441                 mstat->rcv_len_large);
1442         device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1443                 mstat->rcv_jabber);
1444         device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1445                 mstat->rcv_dropped);
1446         device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1447                 mstat->fcs_error);
1448         device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1449                 mstat->align_error);
1450 }
1451
1452
1453 static int
1454 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1455 {
1456         device_t                dev;
1457         q80_get_stats_t         *stat;
1458         q80_get_stats_rsp_t     *stat_rsp;
1459         uint32_t                err;
1460
1461         dev = ha->pci_dev;
1462
1463         stat = (q80_get_stats_t *)ha->hw.mbox;
1464         bzero(stat, (sizeof (q80_get_stats_t)));
1465
1466         stat->opcode = Q8_MBX_GET_STATS;
1467         stat->count_version = 2;
1468         stat->count_version |= Q8_MBX_CMD_VERSION;
1469
1470         stat->cmd = cmd;
1471
1472         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1473                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1474                 device_printf(dev, "%s: failed\n", __func__);
1475                 return -1;
1476         }
1477
1478         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1479
1480         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1481
1482         if (err) {
1483                 return -1;
1484         }
1485
1486         return 0;
1487 }
1488
1489 void
1490 ql_get_stats(qla_host_t *ha)
1491 {
1492         q80_get_stats_rsp_t     *stat_rsp;
1493         q80_mac_stats_t         *mstat;
1494         q80_xmt_stats_t         *xstat;
1495         q80_rcv_stats_t         *rstat;
1496         uint32_t                cmd;
1497         int                     i;
1498
1499         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1500         /*
1501          * Get MAC Statistics
1502          */
1503         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1504 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1505
1506         cmd |= ((ha->pci_func & 0x1) << 16);
1507
1508         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1509                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1510                 qla_mac_stats(ha, mstat);
1511         } else {
1512                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1513                         __func__, ha->hw.mbox[0]);
1514         }
1515         /*
1516          * Get RCV Statistics
1517          */
1518         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1519 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1520         cmd |= (ha->hw.rcv_cntxt_id << 16);
1521
1522         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1523                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1524                 qla_rcv_stats(ha, rstat);
1525         } else {
1526                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1527                         __func__, ha->hw.mbox[0]);
1528         }
1529         /*
1530          * Get XMT Statistics
1531          */
1532         for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1533                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1534 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
1535                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1536
1537                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1538                         == 0) {
1539                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1540                         qla_xmt_stats(ha, xstat, i);
1541                 } else {
1542                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1543                                 __func__, ha->hw.mbox[0]);
1544                 }
1545         }
1546         return;
1547 }
1548
1549 static void
1550 qla_get_quick_stats(qla_host_t *ha)
1551 {
1552         q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1553         q80_mac_stats_t         *mstat;
1554         q80_xmt_stats_t         *xstat;
1555         q80_rcv_stats_t         *rstat;
1556         uint32_t                cmd;
1557
1558         stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1559
1560         cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1561 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1562
1563 //      cmd |= ((ha->pci_func & 0x3) << 16);
1564         cmd |= (0xFFFF << 16);
1565
1566         if (qla_get_hw_stats(ha, cmd,
1567                         sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1568
1569                 mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1570                 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1571                 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1572                 qla_mac_stats(ha, mstat);
1573                 qla_rcv_stats(ha, rstat);
1574                 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1575         } else {
1576                 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1577                         __func__, ha->hw.mbox[0]);
1578         }
1579         return;
1580 }
1581
1582 /*
1583  * Name: qla_tx_tso
1584  * Function: Checks if the packet to be transmitted is a candidate for
1585  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1586  *      Ring Structure are plugged in.
1587  */
1588 static int
1589 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1590 {
1591         struct ether_vlan_header *eh;
1592         struct ip *ip = NULL;
1593         struct ip6_hdr *ip6 = NULL;
1594         struct tcphdr *th = NULL;
1595         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1596         uint16_t etype, opcode, offload = 1;
1597         device_t dev;
1598
1599         dev = ha->pci_dev;
1600
1601
1602         eh = mtod(mp, struct ether_vlan_header *);
1603
1604         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1605                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1606                 etype = ntohs(eh->evl_proto);
1607         } else {
1608                 ehdrlen = ETHER_HDR_LEN;
1609                 etype = ntohs(eh->evl_encap_proto);
1610         }
1611
1612         hdrlen = 0;
1613
1614         switch (etype) {
1615                 case ETHERTYPE_IP:
1616
1617                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
1618                                         sizeof(struct tcphdr);
1619
1620                         if (mp->m_len < tcp_opt_off) {
1621                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1622                                 ip = (struct ip *)(hdr + ehdrlen);
1623                         } else {
1624                                 ip = (struct ip *)(mp->m_data + ehdrlen);
1625                         }
1626
1627                         ip_hlen = ip->ip_hl << 2;
1628                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1629
1630                                 
1631                         if ((ip->ip_p != IPPROTO_TCP) ||
1632                                 (ip_hlen != sizeof (struct ip))){
1633                                 /* IP Options are not supported */
1634
1635                                 offload = 0;
1636                         } else
1637                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1638
1639                 break;
1640
1641                 case ETHERTYPE_IPV6:
1642
1643                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1644                                         sizeof (struct tcphdr);
1645
1646                         if (mp->m_len < tcp_opt_off) {
1647                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1648                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1649                         } else {
1650                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1651                         }
1652
1653                         ip_hlen = sizeof(struct ip6_hdr);
1654                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1655
1656                         if (ip6->ip6_nxt != IPPROTO_TCP) {
1657                                 //device_printf(dev, "%s: ipv6\n", __func__);
1658                                 offload = 0;
1659                         } else
1660                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1661                 break;
1662
1663                 default:
1664                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1665                         offload = 0;
1666                 break;
1667         }
1668
1669         if (!offload)
1670                 return (-1);
1671
1672         tcp_hlen = th->th_off << 2;
1673         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1674
1675         if (mp->m_len < hdrlen) {
1676                 if (mp->m_len < tcp_opt_off) {
1677                         if (tcp_hlen > sizeof(struct tcphdr)) {
1678                                 m_copydata(mp, tcp_opt_off,
1679                                         (tcp_hlen - sizeof(struct tcphdr)),
1680                                         &hdr[tcp_opt_off]);
1681                         }
1682                 } else {
1683                         m_copydata(mp, 0, hdrlen, hdr);
1684                 }
1685         }
1686
1687         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1688
1689         tx_cmd->flags_opcode = opcode ;
1690         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1691         tx_cmd->total_hdr_len = hdrlen;
1692
1693         /* Check for Multicast least significant bit of MSB == 1 */
1694         if (eh->evl_dhost[0] & 0x01) {
1695                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1696         }
1697
1698         if (mp->m_len < hdrlen) {
1699                 printf("%d\n", hdrlen);
1700                 return (1);
1701         }
1702
1703         return (0);
1704 }
1705
1706 /*
1707  * Name: qla_tx_chksum
1708  * Function: Checks if the packet to be transmitted is a candidate for
1709  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1710  *      Ring Structure are plugged in.
1711  */
1712 static int
1713 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1714         uint32_t *tcp_hdr_off)
1715 {
1716         struct ether_vlan_header *eh;
1717         struct ip *ip;
1718         struct ip6_hdr *ip6;
1719         uint32_t ehdrlen, ip_hlen;
1720         uint16_t etype, opcode, offload = 1;
1721         device_t dev;
1722         uint8_t buf[sizeof(struct ip6_hdr)];
1723
1724         dev = ha->pci_dev;
1725
1726         *op_code = 0;
1727
1728         if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1729                 return (-1);
1730
1731         eh = mtod(mp, struct ether_vlan_header *);
1732
1733         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1734                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1735                 etype = ntohs(eh->evl_proto);
1736         } else {
1737                 ehdrlen = ETHER_HDR_LEN;
1738                 etype = ntohs(eh->evl_encap_proto);
1739         }
1740
1741                 
1742         switch (etype) {
1743                 case ETHERTYPE_IP:
1744                         ip = (struct ip *)(mp->m_data + ehdrlen);
1745
1746                         ip_hlen = sizeof (struct ip);
1747
1748                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1749                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1750                                 ip = (struct ip *)buf;
1751                         }
1752
1753                         if (ip->ip_p == IPPROTO_TCP)
1754                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1755                         else if (ip->ip_p == IPPROTO_UDP)
1756                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1757                         else {
1758                                 //device_printf(dev, "%s: ipv4\n", __func__);
1759                                 offload = 0;
1760                         }
1761                 break;
1762
1763                 case ETHERTYPE_IPV6:
1764                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1765
1766                         ip_hlen = sizeof(struct ip6_hdr);
1767
1768                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1769                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1770                                         buf);
1771                                 ip6 = (struct ip6_hdr *)buf;
1772                         }
1773
1774                         if (ip6->ip6_nxt == IPPROTO_TCP)
1775                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1776                         else if (ip6->ip6_nxt == IPPROTO_UDP)
1777                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1778                         else {
1779                                 //device_printf(dev, "%s: ipv6\n", __func__);
1780                                 offload = 0;
1781                         }
1782                 break;
1783
1784                 default:
1785                         offload = 0;
1786                 break;
1787         }
1788         if (!offload)
1789                 return (-1);
1790
1791         *op_code = opcode;
1792         *tcp_hdr_off = (ip_hlen + ehdrlen);
1793
1794         return (0);
1795 }
1796
1797 #define QLA_TX_MIN_FREE 2
1798 /*
1799  * Name: ql_hw_send
1800  * Function: Transmits a packet. It first checks if the packet is a
1801  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1802  *      offload. If either of these creteria are not met, it is transmitted
1803  *      as a regular ethernet frame.
1804  */
1805 int
1806 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1807         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1808 {
1809         struct ether_vlan_header *eh;
1810         qla_hw_t *hw = &ha->hw;
1811         q80_tx_cmd_t *tx_cmd, tso_cmd;
1812         bus_dma_segment_t *c_seg;
1813         uint32_t num_tx_cmds, hdr_len = 0;
1814         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1815         device_t dev;
1816         int i, ret;
1817         uint8_t *src = NULL, *dst = NULL;
1818         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1819         uint32_t op_code = 0;
1820         uint32_t tcp_hdr_off = 0;
1821
1822         dev = ha->pci_dev;
1823
1824         /*
1825          * Always make sure there is atleast one empty slot in the tx_ring
1826          * tx_ring is considered full when there only one entry available
1827          */
1828         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1829
1830         total_length = mp->m_pkthdr.len;
1831         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1832                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1833                         __func__, total_length);
1834                 return (-1);
1835         }
1836         eh = mtod(mp, struct ether_vlan_header *);
1837
1838         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1839
1840                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1841
1842                 src = frame_hdr;
1843                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1844
1845                 if (!(ret & ~1)) {
1846                         /* find the additional tx_cmd descriptors required */
1847
1848                         if (mp->m_flags & M_VLANTAG)
1849                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1850
1851                         hdr_len = tso_cmd.total_hdr_len;
1852
1853                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1854                         bytes = QL_MIN(bytes, hdr_len);
1855
1856                         num_tx_cmds++;
1857                         hdr_len -= bytes;
1858
1859                         while (hdr_len) {
1860                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1861                                 hdr_len -= bytes;
1862                                 num_tx_cmds++;
1863                         }
1864                         hdr_len = tso_cmd.total_hdr_len;
1865
1866                         if (ret == 0)
1867                                 src = (uint8_t *)eh;
1868                 } else 
1869                         return (EINVAL);
1870         } else {
1871                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1872         }
1873
1874         if (iscsi_pdu)
1875                 ha->hw.iscsi_pkt_count++;
1876
1877         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1878                 qla_hw_tx_done_locked(ha, txr_idx);
1879                 if (hw->tx_cntxt[txr_idx].txr_free <=
1880                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
1881                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1882                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1883                                 __func__));
1884                         return (-1);
1885                 }
1886         }
1887
1888         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1889
1890         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1891
1892                 if (nsegs > ha->hw.max_tx_segs)
1893                         ha->hw.max_tx_segs = nsegs;
1894
1895                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1896
1897                 if (op_code) {
1898                         tx_cmd->flags_opcode = op_code;
1899                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
1900
1901                 } else {
1902                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1903                 }
1904         } else {
1905                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1906                 ha->tx_tso_frames++;
1907         }
1908
1909         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1910                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1911
1912                 if (iscsi_pdu)
1913                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
1914
1915         } else if (mp->m_flags & M_VLANTAG) {
1916
1917                 if (hdr_len) { /* TSO */
1918                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1919                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1920                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1921                 } else
1922                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1923
1924                 ha->hw_vlan_tx_frames++;
1925                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1926
1927                 if (iscsi_pdu) {
1928                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
1929                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
1930                 }
1931         }
1932
1933
1934         tx_cmd->n_bufs = (uint8_t)nsegs;
1935         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1936         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1937         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1938
1939         c_seg = segs;
1940
1941         while (1) {
1942                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1943
1944                         switch (i) {
1945                         case 0:
1946                                 tx_cmd->buf1_addr = c_seg->ds_addr;
1947                                 tx_cmd->buf1_len = c_seg->ds_len;
1948                                 break;
1949
1950                         case 1:
1951                                 tx_cmd->buf2_addr = c_seg->ds_addr;
1952                                 tx_cmd->buf2_len = c_seg->ds_len;
1953                                 break;
1954
1955                         case 2:
1956                                 tx_cmd->buf3_addr = c_seg->ds_addr;
1957                                 tx_cmd->buf3_len = c_seg->ds_len;
1958                                 break;
1959
1960                         case 3:
1961                                 tx_cmd->buf4_addr = c_seg->ds_addr;
1962                                 tx_cmd->buf4_len = c_seg->ds_len;
1963                                 break;
1964                         }
1965
1966                         c_seg++;
1967                         nsegs--;
1968                 }
1969
1970                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
1971                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
1972                                 (NUM_TX_DESCRIPTORS - 1);
1973                 tx_cmd_count++;
1974
1975                 if (!nsegs)
1976                         break;
1977                 
1978                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1979                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1980         }
1981
1982         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1983
1984                 /* TSO : Copy the header in the following tx cmd descriptors */
1985
1986                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
1987
1988                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1989                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1990
1991                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1992                 bytes = QL_MIN(bytes, hdr_len);
1993
1994                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1995
1996                 if (mp->m_flags & M_VLANTAG) {
1997                         /* first copy the src/dst MAC addresses */
1998                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
1999                         dst += (ETHER_ADDR_LEN * 2);
2000                         src += (ETHER_ADDR_LEN * 2);
2001                         
2002                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2003                         dst += 2;
2004                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2005                         dst += 2;
2006
2007                         /* bytes left in src header */
2008                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2009                                         ETHER_VLAN_ENCAP_LEN);
2010
2011                         /* bytes left in TxCmd Entry */
2012                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2013
2014
2015                         bcopy(src, dst, bytes);
2016                         src += bytes;
2017                         hdr_len -= bytes;
2018                 } else {
2019                         bcopy(src, dst, bytes);
2020                         src += bytes;
2021                         hdr_len -= bytes;
2022                 }
2023
2024                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2025                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2026                                         (NUM_TX_DESCRIPTORS - 1);
2027                 tx_cmd_count++;
2028                 
2029                 while (hdr_len) {
2030                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2031                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2032
2033                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2034
2035                         bcopy(src, tx_cmd, bytes);
2036                         src += bytes;
2037                         hdr_len -= bytes;
2038
2039                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2040                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2041                                         (NUM_TX_DESCRIPTORS - 1);
2042                         tx_cmd_count++;
2043                 }
2044         }
2045
2046         hw->tx_cntxt[txr_idx].txr_free =
2047                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2048
2049         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2050                 txr_idx);
2051         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2052
2053         return (0);
2054 }
2055
2056
2057
2058 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2059 static int
2060 qla_config_rss_ind_table(qla_host_t *ha)
2061 {
2062         uint32_t i, count;
2063         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2064
2065
2066         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2067                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2068         }
2069
2070         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2071                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2072
2073                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2074                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2075                 } else {
2076                         count = Q8_CONFIG_IND_TBL_SIZE;
2077                 }
2078
2079                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2080                         rss_ind_tbl))
2081                         return (-1);
2082         }
2083
2084         return (0);
2085 }
2086
2087 /*
2088  * Name: ql_del_hw_if
2089  * Function: Destroys the hardware specific entities corresponding to an
2090  *      Ethernet Interface
2091  */
2092 void
2093 ql_del_hw_if(qla_host_t *ha)
2094 {
2095         uint32_t i;
2096         uint32_t num_msix;
2097
2098         (void)qla_stop_nic_func(ha);
2099
2100         qla_del_rcv_cntxt(ha);
2101         qla_del_xmt_cntxt(ha);
2102
2103         if (ha->hw.flags.init_intr_cnxt) {
2104                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2105
2106                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2107                                 num_msix = Q8_MAX_INTR_VECTORS;
2108                         else
2109                                 num_msix = ha->hw.num_sds_rings - i;
2110                         qla_config_intr_cntxt(ha, i, num_msix, 0);
2111
2112                         i += num_msix;
2113                 }
2114
2115                 ha->hw.flags.init_intr_cnxt = 0;
2116         }
2117         return;
2118 }
2119
2120 void
2121 qla_confirm_9kb_enable(qla_host_t *ha)
2122 {
2123         uint32_t supports_9kb = 0;
2124
2125         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2126
2127         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2128         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2129         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2130
2131         qla_get_nic_partition(ha, &supports_9kb, NULL);
2132
2133         if (!supports_9kb)
2134                 ha->hw.enable_9kb = 0;
2135
2136         return;
2137 }
2138
2139
2140 /*
2141  * Name: ql_init_hw_if
2142  * Function: Creates the hardware specific entities corresponding to an
2143  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2144  *      corresponding to the interface. Enables LRO if allowed.
2145  */
2146 int
2147 ql_init_hw_if(qla_host_t *ha)
2148 {
2149         device_t        dev;
2150         uint32_t        i;
2151         uint8_t         bcast_mac[6];
2152         qla_rdesc_t     *rdesc;
2153         uint32_t        num_msix;
2154
2155         dev = ha->pci_dev;
2156
2157         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2158                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2159                         ha->hw.dma_buf.sds_ring[i].size);
2160         }
2161
2162         for (i = 0; i < ha->hw.num_sds_rings; ) {
2163
2164                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2165                         num_msix = Q8_MAX_INTR_VECTORS;
2166                 else
2167                         num_msix = ha->hw.num_sds_rings - i;
2168
2169                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2170
2171                         if (i > 0) {
2172
2173                                 num_msix = i;
2174
2175                                 for (i = 0; i < num_msix; ) {
2176                                         qla_config_intr_cntxt(ha, i,
2177                                                 Q8_MAX_INTR_VECTORS, 0);
2178                                         i += Q8_MAX_INTR_VECTORS;
2179                                 }
2180                         }
2181                         return (-1);
2182                 }
2183
2184                 i = i + num_msix;
2185         }
2186
2187         ha->hw.flags.init_intr_cnxt = 1;
2188
2189         if (ha->hw.mdump_init == 0) {
2190                 qla_minidump_init(ha);
2191         }
2192
2193         /*
2194          * Create Receive Context
2195          */
2196         if (qla_init_rcv_cntxt(ha)) {
2197                 return (-1);
2198         }
2199
2200         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2201                 rdesc = &ha->hw.rds[i];
2202                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2203                 rdesc->rx_in = 0;
2204                 /* Update the RDS Producer Indices */
2205                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2206                         rdesc->rx_next);
2207         }
2208
2209
2210         /*
2211          * Create Transmit Context
2212          */
2213         if (qla_init_xmt_cntxt(ha)) {
2214                 qla_del_rcv_cntxt(ha);
2215                 return (-1);
2216         }
2217         ha->hw.max_tx_segs = 0;
2218
2219         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
2220                 return(-1);
2221
2222         ha->hw.flags.unicast_mac = 1;
2223
2224         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2225         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2226
2227         if (qla_config_mac_addr(ha, bcast_mac, 1))
2228                 return (-1);
2229
2230         ha->hw.flags.bcast_mac = 1;
2231
2232         /*
2233          * program any cached multicast addresses
2234          */
2235         if (qla_hw_add_all_mcast(ha))
2236                 return (-1);
2237
2238         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2239                 return (-1);
2240
2241         if (qla_config_rss_ind_table(ha))
2242                 return (-1);
2243
2244         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2245                 return (-1);
2246
2247         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2248                 return (-1);
2249
2250         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2251                 return (-1);
2252
2253         if (qla_init_nic_func(ha))
2254                 return (-1);
2255
2256         if (qla_query_fw_dcbx_caps(ha))
2257                 return (-1);
2258
2259         for (i = 0; i < ha->hw.num_sds_rings; i++)
2260                 QL_ENABLE_INTERRUPTS(ha, i);
2261
2262         return (0);
2263 }
2264
2265 static int
2266 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2267 {
2268         device_t                dev = ha->pci_dev;
2269         q80_rq_map_sds_to_rds_t *map_rings;
2270         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2271         uint32_t                i, err;
2272         qla_hw_t                *hw = &ha->hw;
2273
2274         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2275         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2276
2277         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2278         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2279         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2280
2281         map_rings->cntxt_id = hw->rcv_cntxt_id;
2282         map_rings->num_rings = num_idx;
2283
2284         for (i = 0; i < num_idx; i++) {
2285                 map_rings->sds_rds[i].sds_ring = i + start_idx;
2286                 map_rings->sds_rds[i].rds_ring = i + start_idx;
2287         }
2288
2289         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2290                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2291                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2292                 device_printf(dev, "%s: failed0\n", __func__);
2293                 return (-1);
2294         }
2295
2296         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2297
2298         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2299
2300         if (err) {
2301                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2302                 return (-1);
2303         }
2304
2305         return (0);
2306 }
2307
2308 /*
2309  * Name: qla_init_rcv_cntxt
2310  * Function: Creates the Receive Context.
2311  */
2312 static int
2313 qla_init_rcv_cntxt(qla_host_t *ha)
2314 {
2315         q80_rq_rcv_cntxt_t      *rcntxt;
2316         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
2317         q80_stat_desc_t         *sdesc;
2318         int                     i, j;
2319         qla_hw_t                *hw = &ha->hw;
2320         device_t                dev;
2321         uint32_t                err;
2322         uint32_t                rcntxt_sds_rings;
2323         uint32_t                rcntxt_rds_rings;
2324         uint32_t                max_idx;
2325
2326         dev = ha->pci_dev;
2327
2328         /*
2329          * Create Receive Context
2330          */
2331
2332         for (i = 0; i < hw->num_sds_rings; i++) {
2333                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2334
2335                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2336                         sdesc->data[0] = 1ULL;
2337                         sdesc->data[1] = 1ULL;
2338                 }
2339         }
2340
2341         rcntxt_sds_rings = hw->num_sds_rings;
2342         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2343                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2344
2345         rcntxt_rds_rings = hw->num_rds_rings;
2346
2347         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2348                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2349
2350         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2351         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2352
2353         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2354         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2355         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2356
2357         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2358                         Q8_RCV_CNTXT_CAP0_LRO |
2359                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2360                         Q8_RCV_CNTXT_CAP0_RSS |
2361                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2362
2363         if (ha->hw.enable_9kb)
2364                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2365         else
2366                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2367
2368         if (ha->hw.num_rds_rings > 1) {
2369                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2370                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2371         } else
2372                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2373
2374         rcntxt->nsds_rings = rcntxt_sds_rings;
2375
2376         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2377
2378         rcntxt->rcv_vpid = 0;
2379
2380         for (i = 0; i <  rcntxt_sds_rings; i++) {
2381                 rcntxt->sds[i].paddr =
2382                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2383                 rcntxt->sds[i].size =
2384                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2385                 if (ha->msix_count == 2) {
2386                         rcntxt->sds[i].intr_id =
2387                                 qla_host_to_le16(hw->intr_id[0]);
2388                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2389                 } else {
2390                         rcntxt->sds[i].intr_id =
2391                                 qla_host_to_le16(hw->intr_id[i]);
2392                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2393                 }
2394         }
2395
2396         for (i = 0; i <  rcntxt_rds_rings; i++) {
2397                 rcntxt->rds[i].paddr_std =
2398                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2399
2400                 if (ha->hw.enable_9kb)
2401                         rcntxt->rds[i].std_bsize =
2402                                 qla_host_to_le64(MJUM9BYTES);
2403                 else
2404                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2405
2406                 rcntxt->rds[i].std_nentries =
2407                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2408         }
2409
2410         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2411                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2412                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2413                 device_printf(dev, "%s: failed0\n", __func__);
2414                 return (-1);
2415         }
2416
2417         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2418
2419         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2420
2421         if (err) {
2422                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2423                 return (-1);
2424         }
2425
2426         for (i = 0; i <  rcntxt_sds_rings; i++) {
2427                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2428         }
2429
2430         for (i = 0; i <  rcntxt_rds_rings; i++) {
2431                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2432         }
2433
2434         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2435
2436         ha->hw.flags.init_rx_cnxt = 1;
2437
2438         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2439
2440                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2441
2442                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2443                                 max_idx = MAX_RCNTXT_SDS_RINGS;
2444                         else
2445                                 max_idx = hw->num_sds_rings - i;
2446
2447                         err = qla_add_rcv_rings(ha, i, max_idx);
2448                         if (err)
2449                                 return -1;
2450
2451                         i += max_idx;
2452                 }
2453         }
2454
2455         if (hw->num_rds_rings > 1) {
2456
2457                 for (i = 0; i < hw->num_rds_rings; ) {
2458
2459                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2460                                 max_idx = MAX_SDS_TO_RDS_MAP;
2461                         else
2462                                 max_idx = hw->num_rds_rings - i;
2463
2464                         err = qla_map_sds_to_rds(ha, i, max_idx);
2465                         if (err)
2466                                 return -1;
2467
2468                         i += max_idx;
2469                 }
2470         }
2471
2472         return (0);
2473 }
2474
2475 static int
2476 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2477 {
2478         device_t                dev = ha->pci_dev;
2479         q80_rq_add_rcv_rings_t  *add_rcv;
2480         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2481         uint32_t                i,j, err;
2482         qla_hw_t                *hw = &ha->hw;
2483
2484         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2485         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2486
2487         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2488         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2489         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2490
2491         add_rcv->nrds_sets_rings = nsds | (1 << 5);
2492         add_rcv->nsds_rings = nsds;
2493         add_rcv->cntxt_id = hw->rcv_cntxt_id;
2494
2495         for (i = 0; i <  nsds; i++) {
2496
2497                 j = i + sds_idx;
2498
2499                 add_rcv->sds[i].paddr =
2500                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2501
2502                 add_rcv->sds[i].size =
2503                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2504
2505                 if (ha->msix_count == 2) {
2506                         add_rcv->sds[i].intr_id =
2507                                 qla_host_to_le16(hw->intr_id[0]);
2508                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2509                 } else {
2510                         add_rcv->sds[i].intr_id =
2511                                 qla_host_to_le16(hw->intr_id[j]);
2512                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2513                 }
2514
2515         }
2516         for (i = 0; (i <  nsds); i++) {
2517                 j = i + sds_idx;
2518
2519                 add_rcv->rds[i].paddr_std =
2520                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2521
2522                 if (ha->hw.enable_9kb)
2523                         add_rcv->rds[i].std_bsize =
2524                                 qla_host_to_le64(MJUM9BYTES);
2525                 else
2526                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2527
2528                 add_rcv->rds[i].std_nentries =
2529                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2530         }
2531
2532
2533         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2534                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2535                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2536                 device_printf(dev, "%s: failed0\n", __func__);
2537                 return (-1);
2538         }
2539
2540         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2541
2542         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2543
2544         if (err) {
2545                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2546                 return (-1);
2547         }
2548
2549         for (i = 0; i < nsds; i++) {
2550                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2551         }
2552
2553         for (i = 0; i < nsds; i++) {
2554                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2555         }
2556
2557         return (0);
2558 }
2559
2560 /*
2561  * Name: qla_del_rcv_cntxt
2562  * Function: Destroys the Receive Context.
2563  */
2564 static void
2565 qla_del_rcv_cntxt(qla_host_t *ha)
2566 {
2567         device_t                        dev = ha->pci_dev;
2568         q80_rcv_cntxt_destroy_t         *rcntxt;
2569         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
2570         uint32_t                        err;
2571         uint8_t                         bcast_mac[6];
2572
2573         if (!ha->hw.flags.init_rx_cnxt)
2574                 return;
2575
2576         if (qla_hw_del_all_mcast(ha))
2577                 return;
2578
2579         if (ha->hw.flags.bcast_mac) {
2580
2581                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2582                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2583
2584                 if (qla_config_mac_addr(ha, bcast_mac, 0))
2585                         return;
2586                 ha->hw.flags.bcast_mac = 0;
2587
2588         }
2589
2590         if (ha->hw.flags.unicast_mac) {
2591                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2592                         return;
2593                 ha->hw.flags.unicast_mac = 0;
2594         }
2595
2596         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2597         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2598
2599         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2600         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2601         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2602
2603         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2604
2605         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2606                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2607                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2608                 device_printf(dev, "%s: failed0\n", __func__);
2609                 return;
2610         }
2611         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2612
2613         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2614
2615         if (err) {
2616                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2617         }
2618
2619         ha->hw.flags.init_rx_cnxt = 0;
2620         return;
2621 }
2622
2623 /*
2624  * Name: qla_init_xmt_cntxt
2625  * Function: Creates the Transmit Context.
2626  */
2627 static int
2628 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2629 {
2630         device_t                dev;
2631         qla_hw_t                *hw = &ha->hw;
2632         q80_rq_tx_cntxt_t       *tcntxt;
2633         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
2634         uint32_t                err;
2635         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2636
2637         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2638
2639         dev = ha->pci_dev;
2640
2641         /*
2642          * Create Transmit Context
2643          */
2644         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2645         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2646
2647         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2648         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2649         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2650
2651 #ifdef QL_ENABLE_ISCSI_TLV
2652
2653         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2654                                 Q8_TX_CNTXT_CAP0_TC;
2655
2656         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2657                 tcntxt->traffic_class = 1;
2658         }
2659
2660 #else
2661
2662         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2663
2664 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2665
2666         tcntxt->ntx_rings = 1;
2667
2668         tcntxt->tx_ring[0].paddr =
2669                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2670         tcntxt->tx_ring[0].tx_consumer =
2671                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2672         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2673
2674         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2675         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2676
2677
2678         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2679         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2680
2681         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2682                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2683                 ha->hw.mbox,
2684                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2685                 device_printf(dev, "%s: failed0\n", __func__);
2686                 return (-1);
2687         }
2688         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2689
2690         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2691
2692         if (err) {
2693                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2694                 return -1;
2695         }
2696
2697         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2698         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2699
2700         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2701                 return (-1);
2702
2703         return (0);
2704 }
2705
2706
2707 /*
2708  * Name: qla_del_xmt_cntxt
2709  * Function: Destroys the Transmit Context.
2710  */
2711 static int
2712 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2713 {
2714         device_t                        dev = ha->pci_dev;
2715         q80_tx_cntxt_destroy_t          *tcntxt;
2716         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
2717         uint32_t                        err;
2718
2719         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2720         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2721
2722         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2723         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2724         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2725
2726         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2727
2728         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2729                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2730                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2731                 device_printf(dev, "%s: failed0\n", __func__);
2732                 return (-1);
2733         }
2734         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2735
2736         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2737
2738         if (err) {
2739                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2740                 return (-1);
2741         }
2742
2743         return (0);
2744 }
2745 static void
2746 qla_del_xmt_cntxt(qla_host_t *ha)
2747 {
2748         uint32_t i;
2749
2750         if (!ha->hw.flags.init_tx_cnxt)
2751                 return;
2752
2753         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2754                 if (qla_del_xmt_cntxt_i(ha, i))
2755                         break;
2756         }
2757         ha->hw.flags.init_tx_cnxt = 0;
2758 }
2759
2760 static int
2761 qla_init_xmt_cntxt(qla_host_t *ha)
2762 {
2763         uint32_t i, j;
2764
2765         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2766                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2767                         for (j = 0; j < i; j++)
2768                                 qla_del_xmt_cntxt_i(ha, j);
2769                         return (-1);
2770                 }
2771         }
2772         ha->hw.flags.init_tx_cnxt = 1;
2773         return (0);
2774 }
2775
2776 static int
2777 qla_hw_add_all_mcast(qla_host_t *ha)
2778 {
2779         int i, nmcast;
2780
2781         nmcast = ha->hw.nmcast;
2782
2783         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2784                 if ((ha->hw.mcast[i].addr[0] != 0) || 
2785                         (ha->hw.mcast[i].addr[1] != 0) ||
2786                         (ha->hw.mcast[i].addr[2] != 0) ||
2787                         (ha->hw.mcast[i].addr[3] != 0) ||
2788                         (ha->hw.mcast[i].addr[4] != 0) ||
2789                         (ha->hw.mcast[i].addr[5] != 0)) {
2790
2791                         if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2792                                 device_printf(ha->pci_dev, "%s: failed\n",
2793                                         __func__);
2794                                 return (-1);
2795                         }
2796
2797                         nmcast--;
2798                 }
2799         }
2800         return 0;
2801 }
2802
2803 static int
2804 qla_hw_del_all_mcast(qla_host_t *ha)
2805 {
2806         int i, nmcast;
2807
2808         nmcast = ha->hw.nmcast;
2809
2810         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2811                 if ((ha->hw.mcast[i].addr[0] != 0) || 
2812                         (ha->hw.mcast[i].addr[1] != 0) ||
2813                         (ha->hw.mcast[i].addr[2] != 0) ||
2814                         (ha->hw.mcast[i].addr[3] != 0) ||
2815                         (ha->hw.mcast[i].addr[4] != 0) ||
2816                         (ha->hw.mcast[i].addr[5] != 0)) {
2817
2818                         if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2819                                 return (-1);
2820
2821                         nmcast--;
2822                 }
2823         }
2824         return 0;
2825 }
2826
2827 static int
2828 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2829 {
2830         int i;
2831
2832         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2833
2834                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2835                         return 0; /* its been already added */
2836         }
2837
2838         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2839
2840                 if ((ha->hw.mcast[i].addr[0] == 0) && 
2841                         (ha->hw.mcast[i].addr[1] == 0) &&
2842                         (ha->hw.mcast[i].addr[2] == 0) &&
2843                         (ha->hw.mcast[i].addr[3] == 0) &&
2844                         (ha->hw.mcast[i].addr[4] == 0) &&
2845                         (ha->hw.mcast[i].addr[5] == 0)) {
2846
2847                         if (qla_config_mac_addr(ha, mta, 1))
2848                                 return (-1);
2849
2850                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2851                         ha->hw.nmcast++;        
2852
2853                         return 0;
2854                 }
2855         }
2856         return 0;
2857 }
2858
2859 static int
2860 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2861 {
2862         int i;
2863
2864         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2865                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2866
2867                         if (qla_config_mac_addr(ha, mta, 0))
2868                                 return (-1);
2869
2870                         ha->hw.mcast[i].addr[0] = 0;
2871                         ha->hw.mcast[i].addr[1] = 0;
2872                         ha->hw.mcast[i].addr[2] = 0;
2873                         ha->hw.mcast[i].addr[3] = 0;
2874                         ha->hw.mcast[i].addr[4] = 0;
2875                         ha->hw.mcast[i].addr[5] = 0;
2876
2877                         ha->hw.nmcast--;        
2878
2879                         return 0;
2880                 }
2881         }
2882         return 0;
2883 }
2884
2885 /*
2886  * Name: ql_hw_set_multi
2887  * Function: Sets the Multicast Addresses provided the host O.S into the
2888  *      hardware (for the given interface)
2889  */
2890 int
2891 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2892         uint32_t add_mac)
2893 {
2894         int i;
2895         uint8_t *mta = mcast;
2896         int ret = 0;
2897
2898         for (i = 0; i < mcnt; i++) {
2899                 if (add_mac) {
2900                         ret = qla_hw_add_mcast(ha, mta);
2901                         if (ret)
2902                                 break;
2903                 } else {
2904                         ret = qla_hw_del_mcast(ha, mta);
2905                         if (ret)
2906                                 break;
2907                 }
2908                         
2909                 mta += Q8_MAC_ADDR_LEN;
2910         }
2911         return (ret);
2912 }
2913
2914 /*
2915  * Name: qla_hw_tx_done_locked
2916  * Function: Handle Transmit Completions
2917  */
2918 static void
2919 qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2920 {
2921         qla_tx_buf_t *txb;
2922         qla_hw_t *hw = &ha->hw;
2923         uint32_t comp_idx, comp_count = 0;
2924         qla_hw_tx_cntxt_t *hw_tx_cntxt;
2925
2926         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2927
2928         /* retrieve index of last entry in tx ring completed */
2929         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2930
2931         while (comp_idx != hw_tx_cntxt->txr_comp) {
2932
2933                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2934
2935                 hw_tx_cntxt->txr_comp++;
2936                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2937                         hw_tx_cntxt->txr_comp = 0;
2938
2939                 comp_count++;
2940
2941                 if (txb->m_head) {
2942                         ha->ifp->if_opackets++;
2943
2944                         bus_dmamap_sync(ha->tx_tag, txb->map,
2945                                 BUS_DMASYNC_POSTWRITE);
2946                         bus_dmamap_unload(ha->tx_tag, txb->map);
2947                         m_freem(txb->m_head);
2948
2949                         txb->m_head = NULL;
2950                 }
2951         }
2952
2953         hw_tx_cntxt->txr_free += comp_count;
2954         return;
2955 }
2956
2957 /*
2958  * Name: ql_hw_tx_done
2959  * Function: Handle Transmit Completions
2960  */
2961 void
2962 ql_hw_tx_done(qla_host_t *ha)
2963 {
2964         int i;
2965         uint32_t flag = 0;
2966
2967         if (!mtx_trylock(&ha->tx_lock)) {
2968                 QL_DPRINT8(ha, (ha->pci_dev,
2969                         "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2970                 return;
2971         }
2972         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2973                 qla_hw_tx_done_locked(ha, i);
2974                 if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2975                         flag = 1;
2976         }
2977
2978         if (!flag)
2979                 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2980
2981         QLA_TX_UNLOCK(ha);
2982         return;
2983 }
2984
2985 void
2986 ql_update_link_state(qla_host_t *ha)
2987 {
2988         uint32_t link_state;
2989         uint32_t prev_link_state;
2990
2991         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2992                 ha->hw.link_up = 0;
2993                 return;
2994         }
2995         link_state = READ_REG32(ha, Q8_LINK_STATE);
2996
2997         prev_link_state =  ha->hw.link_up;
2998
2999         if (ha->pci_func == 0) 
3000                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3001         else
3002                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3003
3004         if (prev_link_state !=  ha->hw.link_up) {
3005                 if (ha->hw.link_up) {
3006                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3007                 } else {
3008                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3009                 }
3010         }
3011         return;
3012 }
3013
3014 void
3015 ql_hw_stop_rcv(qla_host_t *ha)
3016 {
3017         int i, done, count = 100;
3018
3019         while (count) {
3020                 done = 1;
3021                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
3022                         if (ha->hw.sds[i].rcv_active)
3023                                 done = 0;
3024                 }
3025                 if (done)
3026                         break;
3027                 else 
3028                         qla_mdelay(__func__, 10);
3029                 count--;
3030         }
3031         if (!count)
3032                 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3033
3034         return;
3035 }
3036
3037 int
3038 ql_hw_check_health(qla_host_t *ha)
3039 {
3040         uint32_t val;
3041
3042         ha->hw.health_count++;
3043
3044         if (ha->hw.health_count < 1000)
3045                 return 0;
3046
3047         ha->hw.health_count = 0;
3048
3049         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3050
3051         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3052                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3053                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3054                         __func__, val);
3055                 return -1;
3056         }
3057
3058         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3059
3060         if ((val != ha->hw.hbeat_value) &&
3061                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3062                 ha->hw.hbeat_value = val;
3063                 return 0;
3064         }
3065         device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3066                 __func__, val);
3067
3068         return -1;
3069 }
3070
3071 static int
3072 qla_init_nic_func(qla_host_t *ha)
3073 {
3074         device_t                dev;
3075         q80_init_nic_func_t     *init_nic;
3076         q80_init_nic_func_rsp_t *init_nic_rsp;
3077         uint32_t                err;
3078
3079         dev = ha->pci_dev;
3080
3081         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3082         bzero(init_nic, sizeof(q80_init_nic_func_t));
3083
3084         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3085         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3086         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3087
3088         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3089         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3090         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3091
3092 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3093         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3094                 (sizeof (q80_init_nic_func_t) >> 2),
3095                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3096                 device_printf(dev, "%s: failed\n", __func__);
3097                 return -1;
3098         }
3099
3100         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3101 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3102
3103         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3104
3105         if (err) {
3106                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3107         }
3108
3109         return 0;
3110 }
3111
3112 static int
3113 qla_stop_nic_func(qla_host_t *ha)
3114 {
3115         device_t                dev;
3116         q80_stop_nic_func_t     *stop_nic;
3117         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3118         uint32_t                err;
3119
3120         dev = ha->pci_dev;
3121
3122         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3123         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3124
3125         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3126         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3127         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3128
3129         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3130         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3131
3132 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3133         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3134                 (sizeof (q80_stop_nic_func_t) >> 2),
3135                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3136                 device_printf(dev, "%s: failed\n", __func__);
3137                 return -1;
3138         }
3139
3140         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3141 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3142
3143         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3144
3145         if (err) {
3146                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3147         }
3148
3149         return 0;
3150 }
3151
3152 static int
3153 qla_query_fw_dcbx_caps(qla_host_t *ha)
3154 {
3155         device_t                        dev;
3156         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3157         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3158         uint32_t                        err;
3159
3160         dev = ha->pci_dev;
3161
3162         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3163         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3164
3165         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3166         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3167         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3168
3169         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3170         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3171                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3172                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3173                 device_printf(dev, "%s: failed\n", __func__);
3174                 return -1;
3175         }
3176
3177         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3178         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3179                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3180
3181         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3182
3183         if (err) {
3184                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3185         }
3186
3187         return 0;
3188 }
3189
3190 static int
3191 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3192         uint32_t aen_mb3, uint32_t aen_mb4)
3193 {
3194         device_t                dev;
3195         q80_idc_ack_t           *idc_ack;
3196         q80_idc_ack_rsp_t       *idc_ack_rsp;
3197         uint32_t                err;
3198         int                     count = 300;
3199
3200         dev = ha->pci_dev;
3201
3202         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3203         bzero(idc_ack, sizeof(q80_idc_ack_t));
3204
3205         idc_ack->opcode = Q8_MBX_IDC_ACK;
3206         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3207         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3208
3209         idc_ack->aen_mb1 = aen_mb1;
3210         idc_ack->aen_mb2 = aen_mb2;
3211         idc_ack->aen_mb3 = aen_mb3;
3212         idc_ack->aen_mb4 = aen_mb4;
3213
3214         ha->hw.imd_compl= 0;
3215
3216         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3217                 (sizeof (q80_idc_ack_t) >> 2),
3218                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3219                 device_printf(dev, "%s: failed\n", __func__);
3220                 return -1;
3221         }
3222
3223         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3224
3225         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3226
3227         if (err) {
3228                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3229                 return(-1);
3230         }
3231
3232         while (count && !ha->hw.imd_compl) {
3233                 qla_mdelay(__func__, 100);
3234                 count--;
3235         }
3236
3237         if (!count)
3238                 return -1;
3239         else
3240                 device_printf(dev, "%s: count %d\n", __func__, count);
3241
3242         return (0);
3243 }
3244
3245 static int
3246 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3247 {
3248         device_t                dev;
3249         q80_set_port_cfg_t      *pcfg;
3250         q80_set_port_cfg_rsp_t  *pfg_rsp;
3251         uint32_t                err;
3252         int                     count = 300;
3253
3254         dev = ha->pci_dev;
3255
3256         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3257         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3258
3259         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3260         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3261         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3262
3263         pcfg->cfg_bits = cfg_bits;
3264
3265         device_printf(dev, "%s: cfg_bits"
3266                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3267                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3268                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3269                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3270                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3271
3272         ha->hw.imd_compl= 0;
3273
3274         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3275                 (sizeof (q80_set_port_cfg_t) >> 2),
3276                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3277                 device_printf(dev, "%s: failed\n", __func__);
3278                 return -1;
3279         }
3280
3281         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3282
3283         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3284
3285         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3286                 while (count && !ha->hw.imd_compl) {
3287                         qla_mdelay(__func__, 100);
3288                         count--;
3289                 }
3290                 if (count) {
3291                         device_printf(dev, "%s: count %d\n", __func__, count);
3292
3293                         err = 0;
3294                 }
3295         }
3296
3297         if (err) {
3298                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3299                 return(-1);
3300         }
3301
3302         return (0);
3303 }
3304
3305
3306 static int
3307 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3308 {
3309         uint32_t                        err;
3310         device_t                        dev = ha->pci_dev;
3311         q80_config_md_templ_size_t      *md_size;
3312         q80_config_md_templ_size_rsp_t  *md_size_rsp;
3313
3314 #ifdef QL_LDFLASH_FW
3315
3316         *size = ql83xx_minidump_len;
3317         return (0);
3318
3319 #endif /* #ifdef QL_LDFLASH_FW */
3320
3321         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3322         bzero(md_size, sizeof(q80_config_md_templ_size_t));
3323
3324         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3325         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3326         md_size->count_version |= Q8_MBX_CMD_VERSION;
3327
3328         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3329                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3330                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3331
3332                 device_printf(dev, "%s: failed\n", __func__);
3333
3334                 return (-1);
3335         }
3336
3337         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3338
3339         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3340
3341         if (err) {
3342                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3343                 return(-1);
3344         }
3345
3346         *size = md_size_rsp->templ_size;
3347
3348         return (0);
3349 }
3350
3351 static int
3352 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3353 {
3354         device_t                dev;
3355         q80_get_port_cfg_t      *pcfg;
3356         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3357         uint32_t                err;
3358
3359         dev = ha->pci_dev;
3360
3361         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3362         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3363
3364         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3365         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3366         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3367
3368         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3369                 (sizeof (q80_get_port_cfg_t) >> 2),
3370                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3371                 device_printf(dev, "%s: failed\n", __func__);
3372                 return -1;
3373         }
3374
3375         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3376
3377         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3378
3379         if (err) {
3380                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3381                 return(-1);
3382         }
3383
3384         device_printf(dev, "%s: [cfg_bits, port type]"
3385                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3386                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3387                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3388                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3389                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3390                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3391                 );
3392
3393         *cfg_bits = pcfg_rsp->cfg_bits;
3394
3395         return (0);
3396 }
3397
3398 int
3399 qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3400 {
3401         struct ether_vlan_header        *eh;
3402         uint16_t                        etype;
3403         struct ip                       *ip = NULL;
3404         struct ip6_hdr                  *ip6 = NULL;
3405         struct tcphdr                   *th = NULL;
3406         uint32_t                        hdrlen;
3407         uint32_t                        offset;
3408         uint8_t                         buf[sizeof(struct ip6_hdr)];
3409
3410         eh = mtod(mp, struct ether_vlan_header *);
3411
3412         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3413                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3414                 etype = ntohs(eh->evl_proto);
3415         } else {
3416                 hdrlen = ETHER_HDR_LEN;
3417                 etype = ntohs(eh->evl_encap_proto);
3418         }
3419
3420         if (etype == ETHERTYPE_IP) {
3421
3422                 offset = (hdrlen + sizeof (struct ip));
3423
3424                 if (mp->m_len >= offset) {
3425                         ip = (struct ip *)(mp->m_data + hdrlen);
3426                 } else {
3427                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3428                         ip = (struct ip *)buf;
3429                 }
3430
3431                 if (ip->ip_p == IPPROTO_TCP) {
3432
3433                         hdrlen += ip->ip_hl << 2;
3434                         offset = hdrlen + 4;
3435         
3436                         if (mp->m_len >= offset) {
3437                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3438                         } else {
3439                                 m_copydata(mp, hdrlen, 4, buf);
3440                                 th = (struct tcphdr *)buf;
3441                         }
3442                 }
3443
3444         } else if (etype == ETHERTYPE_IPV6) {
3445
3446                 offset = (hdrlen + sizeof (struct ip6_hdr));
3447
3448                 if (mp->m_len >= offset) {
3449                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3450                 } else {
3451                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3452                         ip6 = (struct ip6_hdr *)buf;
3453                 }
3454
3455                 if (ip6->ip6_nxt == IPPROTO_TCP) {
3456
3457                         hdrlen += sizeof(struct ip6_hdr);
3458                         offset = hdrlen + 4;
3459
3460                         if (mp->m_len >= offset) {
3461                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3462                         } else {
3463                                 m_copydata(mp, hdrlen, 4, buf);
3464                                 th = (struct tcphdr *)buf;
3465                         }
3466                 }
3467         }
3468
3469         if (th != NULL) {
3470                 if ((th->th_sport == htons(3260)) ||
3471                         (th->th_dport == htons(3260)))
3472                         return 0;
3473         }
3474         return (-1);
3475 }
3476
3477 void
3478 qla_hw_async_event(qla_host_t *ha)
3479 {
3480         switch (ha->hw.aen_mb0) {
3481         case 0x8101:
3482                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3483                         ha->hw.aen_mb3, ha->hw.aen_mb4);
3484
3485                 break;
3486
3487         default:
3488                 break;
3489         }
3490
3491         return;
3492 }
3493
3494 #ifdef QL_LDFLASH_FW
3495 static int
3496 qla_get_minidump_template(qla_host_t *ha)
3497 {
3498         uint32_t                        err;
3499         device_t                        dev = ha->pci_dev;
3500         q80_config_md_templ_cmd_t       *md_templ;
3501         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
3502
3503         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3504         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3505
3506         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3507         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3508         md_templ->count_version |= Q8_MBX_CMD_VERSION;
3509
3510         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3511         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3512
3513         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3514                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
3515                  ha->hw.mbox,
3516                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3517
3518                 device_printf(dev, "%s: failed\n", __func__);
3519
3520                 return (-1);
3521         }
3522
3523         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3524
3525         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3526
3527         if (err) {
3528                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3529                 return (-1);
3530         }
3531
3532         return (0);
3533
3534 }
3535 #endif /* #ifdef QL_LDFLASH_FW */
3536
3537 static int
3538 qla_minidump_init(qla_host_t *ha)
3539 {
3540         int             ret = 0;
3541         uint32_t        template_size = 0;
3542         device_t        dev = ha->pci_dev;
3543
3544         /*
3545          * Get Minidump Template Size
3546          */
3547         ret = qla_get_minidump_tmplt_size(ha, &template_size);
3548
3549         if (ret || (template_size == 0)) {
3550                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
3551                         template_size);
3552                 return (-1);
3553         }
3554
3555         /*
3556          * Allocate Memory for Minidump Template
3557          */
3558
3559         ha->hw.dma_buf.minidump.alignment = 8;
3560         ha->hw.dma_buf.minidump.size = template_size;
3561
3562 #ifdef QL_LDFLASH_FW
3563         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
3564
3565                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
3566
3567                 return (-1);
3568         }
3569         ha->hw.dma_buf.flags.minidump = 1;
3570
3571         /*
3572          * Retrieve Minidump Template
3573          */
3574         ret = qla_get_minidump_template(ha);
3575 #else
3576         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
3577 #endif /* #ifdef QL_LDFLASH_FW */
3578
3579         if (ret) {
3580                 qla_minidump_free(ha);
3581         } else {
3582                 ha->hw.mdump_init = 1;
3583         }
3584
3585         return (ret);
3586 }
3587
3588
3589 static void
3590 qla_minidump_free(qla_host_t *ha)
3591 {
3592         ha->hw.mdump_init = 0;
3593         if (ha->hw.dma_buf.flags.minidump) {
3594                 ha->hw.dma_buf.flags.minidump = 0;
3595                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
3596         }
3597         return;
3598 }
3599
3600 void
3601 ql_minidump(qla_host_t *ha)
3602 {
3603         uint32_t delay = 6000;
3604
3605         if (!ha->hw.mdump_init)
3606                 return;
3607
3608         if (!ha->hw.mdump_active)
3609                 return;
3610
3611         if (ha->hw.mdump_active == 1) {
3612                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
3613                 ha->hw.mdump_start = 1;
3614         }
3615
3616         while (delay-- && ha->hw.mdump_active) {
3617                 qla_mdelay(__func__, 100);
3618         }
3619         ha->hw.mdump_start = 0;
3620         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
3621
3622         return;
3623 }