]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/liquidio/lio_core.c
LinuxKPI: sk_buff: implement skb_queue_splice_tail_init()
[FreeBSD/FreeBSD.git] / sys / dev / liquidio / lio_core.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Cavium, Inc. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include "lio_bsd.h"
35 #include "lio_common.h"
36 #include "lio_droq.h"
37 #include "lio_iq.h"
38 #include "lio_response_manager.h"
39 #include "lio_device.h"
40 #include "lio_ctrl.h"
41 #include "lio_main.h"
42 #include "lio_rxtx.h"
43 #include "lio_network.h"
44
45 int
46 lio_set_feature(if_t ifp, int cmd, uint16_t param1)
47 {
48         struct lio_ctrl_pkt     nctrl;
49         struct lio              *lio = if_getsoftc(ifp);
50         struct octeon_device    *oct = lio->oct_dev;
51         int     ret = 0;
52
53         bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
54
55         nctrl.ncmd.cmd64 = 0;
56         nctrl.ncmd.s.cmd = cmd;
57         nctrl.ncmd.s.param1 = param1;
58         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
59         nctrl.wait_time = 100;
60         nctrl.lio = lio;
61         nctrl.cb_fn = lio_ctrl_cmd_completion;
62
63         ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
64         if (ret < 0) {
65                 lio_dev_err(oct, "Feature change failed in core (ret: 0x%x)\n",
66                             ret);
67         }
68
69         return (ret);
70 }
71
72 void
73 lio_ctrl_cmd_completion(void *nctrl_ptr)
74 {
75         struct lio_ctrl_pkt     *nctrl = (struct lio_ctrl_pkt *)nctrl_ptr;
76         struct lio              *lio;
77         struct octeon_device    *oct;
78         uint8_t *mac;
79
80         lio = nctrl->lio;
81
82         if (lio->oct_dev == NULL)
83                 return;
84
85         oct = lio->oct_dev;
86
87         switch (nctrl->ncmd.s.cmd) {
88         case LIO_CMD_CHANGE_DEVFLAGS:
89         case LIO_CMD_SET_MULTI_LIST:
90                 break;
91
92         case LIO_CMD_CHANGE_MACADDR:
93                 mac = ((uint8_t *)&nctrl->udd[0]) + 2;
94                 if (nctrl->ncmd.s.param1) {
95                         /* vfidx is 0 based, but vf_num (param1) is 1 based */
96                         int     vfidx = nctrl->ncmd.s.param1 - 1;
97                         bool    mac_is_admin_assigned = nctrl->ncmd.s.param2;
98
99                         if (mac_is_admin_assigned)
100                                 lio_dev_info(oct, "MAC Address %pM is configured for VF %d\n",
101                                              mac, vfidx);
102                 } else {
103                         lio_dev_info(oct, "MAC Address changed to %02x:%02x:%02x:%02x:%02x:%02x\n",
104                                      mac[0], mac[1], mac[2], mac[3], mac[4],
105                                      mac[5]);
106                 }
107                 break;
108
109         case LIO_CMD_GPIO_ACCESS:
110                 lio_dev_info(oct, "LED Flashing visual identification\n");
111                 break;
112
113         case LIO_CMD_ID_ACTIVE:
114                 lio_dev_info(oct, "LED Flashing visual identification\n");
115                 break;
116
117         case LIO_CMD_LRO_ENABLE:
118                 lio_dev_info(oct, "HW LRO Enabled\n");
119                 break;
120
121         case LIO_CMD_LRO_DISABLE:
122                 lio_dev_info(oct, "HW LRO Disabled\n");
123                 break;
124
125         case LIO_CMD_VERBOSE_ENABLE:
126                 lio_dev_info(oct, "Firmware debug enabled\n");
127                 break;
128
129         case LIO_CMD_VERBOSE_DISABLE:
130                 lio_dev_info(oct, "Firmware debug disabled\n");
131                 break;
132
133         case LIO_CMD_VLAN_FILTER_CTL:
134                 if (nctrl->ncmd.s.param1)
135                         lio_dev_info(oct, "VLAN filter enabled\n");
136                 else
137                         lio_dev_info(oct, "VLAN filter disabled\n");
138                 break;
139
140         case LIO_CMD_ADD_VLAN_FILTER:
141                 lio_dev_info(oct, "VLAN filter %d added\n",
142                              nctrl->ncmd.s.param1);
143                 break;
144
145         case LIO_CMD_DEL_VLAN_FILTER:
146                 lio_dev_info(oct, "VLAN filter %d removed\n",
147                              nctrl->ncmd.s.param1);
148                 break;
149
150         case LIO_CMD_SET_SETTINGS:
151                 lio_dev_info(oct, "Settings changed\n");
152                 break;
153
154                 /*
155                  * Case to handle "LIO_CMD_TNL_RX_CSUM_CTL"
156                  * Command passed by NIC driver
157                  */
158         case LIO_CMD_TNL_RX_CSUM_CTL:
159                 if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_ENABLE) {
160                         lio_dev_info(oct, "RX Checksum Offload Enabled\n");
161                 } else if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_DISABLE) {
162                         lio_dev_info(oct, "RX Checksum Offload Disabled\n");
163                 }
164                 break;
165
166                 /*
167                  * Case to handle "LIO_CMD_TNL_TX_CSUM_CTL"
168                  * Command passed by NIC driver
169                  */
170         case LIO_CMD_TNL_TX_CSUM_CTL:
171                 if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_ENABLE) {
172                         lio_dev_info(oct, "TX Checksum Offload Enabled\n");
173                 } else if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_DISABLE) {
174                         lio_dev_info(oct, "TX Checksum Offload Disabled\n");
175                 }
176                 break;
177
178                 /*
179                  * Case to handle "LIO_CMD_VXLAN_PORT_CONFIG"
180                  * Command passed by NIC driver
181                  */
182         case LIO_CMD_VXLAN_PORT_CONFIG:
183                 if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_ADD) {
184                         lio_dev_info(oct, "VxLAN Destination UDP PORT:%d ADDED\n",
185                                      nctrl->ncmd.s.param1);
186                 } else if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_DEL) {
187                         lio_dev_info(oct, "VxLAN Destination UDP PORT:%d DELETED\n",
188                                      nctrl->ncmd.s.param1);
189                 }
190                 break;
191
192         case LIO_CMD_SET_FLOW_CTL:
193                 lio_dev_info(oct, "Set RX/TX flow control parameters\n");
194                 break;
195
196         case LIO_CMD_SET_FNV:
197                 if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_ENABLE)
198                         lio_dev_info(oct, "FNV Enabled\n");
199                 else if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_DISABLE)
200                         lio_dev_info(oct, "FNV Disabled\n");
201                 break;
202
203         case LIO_CMD_PKT_STEERING_CTL:
204                 if (nctrl->ncmd.s.param1 == LIO_CMD_PKT_STEERING_ENABLE) {
205                         lio_dev_info(oct, "Packet Steering Enabled\n");
206                 } else if (nctrl->ncmd.s.param1 ==
207                            LIO_CMD_PKT_STEERING_DISABLE) {
208                         lio_dev_info(oct, "Packet Steering Disabled\n");
209                 }
210
211                 break;
212
213         case LIO_CMD_QUEUE_COUNT_CTL:
214                 lio_dev_info(oct, "Queue count updated to %d\n",
215                              nctrl->ncmd.s.param1);
216                 break;
217
218         default:
219                 lio_dev_err(oct, "%s Unknown cmd %d\n", __func__,
220                             nctrl->ncmd.s.cmd);
221         }
222 }
223
224
225 /*
226  * \brief Setup output queue
227  * @param oct octeon device
228  * @param q_no which queue
229  * @param num_descs how many descriptors
230  * @param desc_size size of each descriptor
231  * @param app_ctx application context
232  */
233 static int
234 lio_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
235                int desc_size, void *app_ctx)
236 {
237         int     ret_val = 0;
238
239         lio_dev_dbg(oct, "Creating Droq: %d\n", q_no);
240         /* droq creation and local register settings. */
241         ret_val = lio_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
242         if (ret_val < 0)
243                 return (ret_val);
244
245         if (ret_val == 1) {
246                 lio_dev_dbg(oct, "Using default droq %d\n", q_no);
247                 return (0);
248         }
249
250         /*
251          * Send Credit for Octeon Output queues. Credits are always
252          * sent after the output queue is enabled.
253          */
254         lio_write_csr32(oct, oct->droq[q_no]->pkts_credit_reg,
255                         oct->droq[q_no]->max_count);
256
257         return (ret_val);
258 }
259
260 static void
261 lio_push_packet(void *m_buff, uint32_t len, union octeon_rh *rh, void *rxq,
262                 void *arg)
263 {
264         struct mbuf     *mbuf = m_buff;
265         if_t            ifp = arg;
266         struct lio_droq *droq = rxq;
267
268         if (ifp != NULL) {
269                 struct lio      *lio = if_getsoftc(ifp);
270
271                 /* Do not proceed if the interface is not in RUNNING state. */
272                 if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
273                         lio_recv_buffer_free(mbuf);
274                         droq->stats.rx_dropped++;
275                         return;
276                 }
277
278                 if (rh->r_dh.has_hash) {
279                         uint32_t        hashtype, hashval;
280
281                         if (rh->r_dh.has_hwtstamp) {
282                                 hashval = htobe32(*(uint32_t *)
283                                                   (((uint8_t *)mbuf->m_data) +
284                                                    ((rh->r_dh.len - 2) *
285                                                     BYTES_PER_DHLEN_UNIT)));
286                                 hashtype =
287                                     htobe32(*(((uint32_t *)
288                                                (((uint8_t *)mbuf->m_data) +
289                                                 ((rh->r_dh.len - 2) *
290                                                  BYTES_PER_DHLEN_UNIT))) + 1));
291                         } else {
292                                 hashval = htobe32(*(uint32_t *)
293                                                   (((uint8_t *)mbuf->m_data) +
294                                                    ((rh->r_dh.len - 1) *
295                                                     BYTES_PER_DHLEN_UNIT)));
296                                 hashtype =
297                                     htobe32(*(((uint32_t *)
298                                                (((uint8_t *)mbuf->m_data) +
299                                                 ((rh->r_dh.len - 1) *
300                                                  BYTES_PER_DHLEN_UNIT))) + 1));
301                         }
302
303                         mbuf->m_pkthdr.flowid = hashval;
304
305                         switch (hashtype) {
306                         case LIO_RSS_HASH_IPV4:
307                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
308                                 break;
309                         case LIO_RSS_HASH_TCP_IPV4:
310                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
311                                 break;
312                         case LIO_RSS_HASH_IPV6:
313                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
314                                 break;
315                         case LIO_RSS_HASH_TCP_IPV6:
316                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
317                                 break;
318                         case LIO_RSS_HASH_IPV6_EX:
319                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6_EX);
320                                 break;
321                         case LIO_RSS_HASH_TCP_IPV6_EX:
322                                 M_HASHTYPE_SET(mbuf,
323                                                M_HASHTYPE_RSS_TCP_IPV6_EX);
324                                 break;
325                         default:
326                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
327                         }
328
329                 } else {
330                         /*
331                          * This case won't hit as FW will always set has_hash
332                          * in rh.
333                          */
334                         M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
335                         mbuf->m_pkthdr.flowid = droq->q_no;
336                 }
337
338                 m_adj(mbuf, rh->r_dh.len * 8);
339                 len -= rh->r_dh.len * 8;
340                 mbuf->m_flags |= M_PKTHDR;
341
342                 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) &&
343                     (rh->r_dh.priority || rh->r_dh.vlan)) {
344                         uint16_t        priority = rh->r_dh.priority;
345                         uint16_t        vid = rh->r_dh.vlan;
346                         uint16_t        vtag;
347
348                         vtag = priority << 13 | vid;
349                         mbuf->m_pkthdr.ether_vtag = vtag;
350                         mbuf->m_flags |= M_VLANTAG;
351                 }
352
353                 if (rh->r_dh.csum_verified & LIO_IPSUM_VERIFIED)
354                         mbuf->m_pkthdr.csum_flags |= (CSUM_L3_CALC |
355                                                       CSUM_L3_VALID);
356
357                 if (rh->r_dh.csum_verified & LIO_L4SUM_VERIFIED) {
358                         mbuf->m_pkthdr.csum_flags |= (CSUM_L4_CALC |
359                                                       CSUM_L4_VALID);
360                         mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
361                                                       CSUM_PSEUDO_HDR);
362                         mbuf->m_pkthdr.csum_data = htons(0xffff);
363                 }
364
365                 mbuf->m_pkthdr.rcvif = ifp;
366                 mbuf->m_pkthdr.len = len;
367
368                 if ((lio_hwlro == 0) &&
369                     (if_getcapenable(ifp) & IFCAP_LRO) &&
370                     (mbuf->m_pkthdr.csum_flags &
371                      (CSUM_L3_VALID | CSUM_L4_VALID | CSUM_DATA_VALID |
372                       CSUM_PSEUDO_HDR)) == (CSUM_L3_VALID | CSUM_L4_VALID |
373                                             CSUM_DATA_VALID |
374                                             CSUM_PSEUDO_HDR)) {
375                         if (droq->lro.lro_cnt) {
376                                 if (tcp_lro_rx(&droq->lro, mbuf, 0) == 0) {
377                                         droq->stats.rx_bytes_received += len;
378                                         droq->stats.rx_pkts_received++;
379                                         return;
380                                 }
381                         }
382                 }
383
384                 if_input(ifp, mbuf);
385
386                 droq->stats.rx_bytes_received += len;
387                 droq->stats.rx_pkts_received++;
388
389         } else {
390                 lio_recv_buffer_free(mbuf);
391                 droq->stats.rx_dropped++;
392         }
393 }
394
395 /*
396  * \brief Setup input and output queues
397  * @param octeon_dev octeon device
398  * @param ifidx  Interface Index
399  *
400  * Note: Queues are with respect to the octeon device. Thus
401  * an input queue is for egress packets, and output queues
402  * are for ingress packets.
403  */
404 int
405 lio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
406                     uint32_t num_iqs, uint32_t num_oqs)
407 {
408         struct lio_droq_ops     droq_ops;
409         if_t                    ifp;
410         struct lio_droq         *droq;
411         struct lio              *lio;
412         static int              cpu_id, cpu_id_modulus;
413         int     num_tx_descs, q, q_no, retval = 0;
414
415         ifp = octeon_dev->props.ifp;
416
417         lio = if_getsoftc(ifp);
418
419         bzero(&droq_ops, sizeof(struct lio_droq_ops));
420
421         droq_ops.fptr = lio_push_packet;
422         droq_ops.farg = (void *)ifp;
423
424         cpu_id = 0;
425         cpu_id_modulus = mp_ncpus;
426         /* set up DROQs. */
427         for (q = 0; q < num_oqs; q++) {
428                 q_no = lio->linfo.rxpciq[q].s.q_no;
429                 lio_dev_dbg(octeon_dev, "lio_setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
430                             q, q_no);
431                 retval = lio_setup_droq(octeon_dev, q_no,
432                                         LIO_GET_NUM_RX_DESCS_NIC_IF_CFG(
433                                                      lio_get_conf(octeon_dev),
434                                                                   lio->ifidx),
435                                         LIO_GET_NUM_RX_BUF_SIZE_NIC_IF_CFG(
436                                                      lio_get_conf(octeon_dev),
437                                                            lio->ifidx), NULL);
438                 if (retval) {
439                         lio_dev_err(octeon_dev, "%s : Runtime DROQ(RxQ) creation failed.\n",
440                                     __func__);
441                         return (1);
442                 }
443
444                 droq = octeon_dev->droq[q_no];
445
446                 /* designate a CPU for this droq */
447                 droq->cpu_id = cpu_id;
448                 cpu_id++;
449                 if (cpu_id >= cpu_id_modulus)
450                         cpu_id = 0;
451
452                 lio_register_droq_ops(octeon_dev, q_no, &droq_ops);
453         }
454
455         /* set up IQs. */
456         for (q = 0; q < num_iqs; q++) {
457                 num_tx_descs = LIO_GET_NUM_TX_DESCS_NIC_IF_CFG(
458                                                      lio_get_conf(octeon_dev),
459                                                                lio->ifidx);
460                 retval = lio_setup_iq(octeon_dev, ifidx, q,
461                                       lio->linfo.txpciq[q], num_tx_descs);
462                 if (retval) {
463                         lio_dev_err(octeon_dev, " %s : Runtime IQ(TxQ) creation failed.\n",
464                                     __func__);
465                         return (1);
466                 }
467         }
468
469         return (0);
470 }
471
472 /*
473  * \brief Droq packet processor sceduler
474  * @param oct octeon device
475  */
476 static void
477 lio_schedule_droq_pkt_handlers(struct octeon_device *oct)
478 {
479         struct lio_droq *droq;
480         uint64_t        oq_no;
481
482         if (oct->int_status & LIO_DEV_INTR_PKT_DATA) {
483                 for (oq_no = 0; oq_no < LIO_MAX_OUTPUT_QUEUES(oct); oq_no++) {
484                         if (!(oct->io_qmask.oq & BIT_ULL(oq_no)))
485                                 continue;
486
487                         droq = oct->droq[oq_no];
488
489                         taskqueue_enqueue(droq->droq_taskqueue,
490                                           &droq->droq_task);
491                 }
492         }
493 }
494
495 static void
496 lio_msix_intr_handler(void *vector)
497 {
498         struct lio_ioq_vector   *ioq_vector = (struct lio_ioq_vector *)vector;
499         struct octeon_device    *oct = ioq_vector->oct_dev;
500         struct lio_droq         *droq = oct->droq[ioq_vector->droq_index];
501         uint64_t                ret;
502
503         ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
504
505         if ((ret & LIO_MSIX_PO_INT) || (ret & LIO_MSIX_PI_INT)) {
506                 struct lio_instr_queue *iq = oct->instr_queue[droq->q_no];
507                 int     reschedule, tx_done = 1;
508
509                 reschedule = lio_droq_process_packets(oct, droq, oct->rx_budget);
510
511                 if (atomic_load_acq_int(&iq->instr_pending))
512                         tx_done = lio_flush_iq(oct, iq, oct->tx_budget);
513
514                 if ((oct->props.ifp != NULL) && (iq->br != NULL)) {
515                         if (mtx_trylock(&iq->enq_lock)) {
516                                 if (!drbr_empty(oct->props.ifp, iq->br))
517                                         lio_mq_start_locked(oct->props.ifp,
518                                                             iq);
519                                 mtx_unlock(&iq->enq_lock);
520                         }
521                 }
522
523                 if (reschedule || !tx_done)
524                         taskqueue_enqueue(droq->droq_taskqueue, &droq->droq_task);
525                 else
526                         lio_enable_irq(droq, iq);
527         }
528 }
529
530 static void
531 lio_intr_handler(void *dev)
532 {
533         struct octeon_device    *oct = (struct octeon_device *)dev;
534
535         /* Disable our interrupts for the duration of ISR */
536         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
537
538         oct->fn_list.process_interrupt_regs(oct);
539
540         lio_schedule_droq_pkt_handlers(oct);
541
542         /* Re-enable our interrupts  */
543         if (!(atomic_load_acq_int(&oct->status) == LIO_DEV_IN_RESET))
544                 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
545 }
546
547 int
548 lio_setup_interrupt(struct octeon_device *oct, uint32_t num_ioqs)
549 {
550         device_t                device;
551         struct lio_ioq_vector   *ioq_vector;
552         int     cpu_id, err, i;
553         int     num_alloc_ioq_vectors;
554         int     num_ioq_vectors;
555         int     res_id;
556
557         if (!oct->msix_on)
558                 return (1);
559
560         ioq_vector = oct->ioq_vector;
561
562 #ifdef RSS
563         if (oct->sriov_info.num_pf_rings != rss_getnumbuckets()) {
564                 lio_dev_info(oct, "IOQ vectors (%d) are not equal number of RSS buckets (%d)\n",
565                              oct->sriov_info.num_pf_rings, rss_getnumbuckets());
566         }
567 #endif
568
569         device = oct->device;
570
571         oct->num_msix_irqs = num_ioqs;
572         /* one non ioq interrupt for handling sli_mac_pf_int_sum */
573         oct->num_msix_irqs += 1;
574         num_alloc_ioq_vectors = oct->num_msix_irqs;
575
576         if (pci_alloc_msix(device, &num_alloc_ioq_vectors) ||
577             (num_alloc_ioq_vectors != oct->num_msix_irqs))
578                 goto err;
579
580         num_ioq_vectors = oct->num_msix_irqs;
581
582         /* For PF, there is one non-ioq interrupt handler */
583         for (i = 0; i < num_ioq_vectors - 1; i++, ioq_vector++) {
584                 res_id = i + 1;
585
586                 ioq_vector->msix_res =
587                     bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id,
588                                            RF_SHAREABLE | RF_ACTIVE);
589                 if (ioq_vector->msix_res == NULL) {
590                         lio_dev_err(oct,
591                                     "Unable to allocate bus res msix[%d]\n", i);
592                         goto err_1;
593                 }
594
595                 err = bus_setup_intr(device, ioq_vector->msix_res,
596                                      INTR_TYPE_NET | INTR_MPSAFE, NULL,
597                                      lio_msix_intr_handler, ioq_vector,
598                                      &ioq_vector->tag);
599                 if (err) {
600                         bus_release_resource(device, SYS_RES_IRQ, res_id,
601                                              ioq_vector->msix_res);
602                         ioq_vector->msix_res = NULL;
603                         lio_dev_err(oct, "Failed to register intr handler");
604                         goto err_1;
605                 }
606
607                 bus_describe_intr(device, ioq_vector->msix_res, ioq_vector->tag,
608                                   "rxtx%u", i);
609                 ioq_vector->vector = res_id;
610
611 #ifdef RSS
612                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
613 #else
614                 cpu_id = i % mp_ncpus;
615 #endif
616                 CPU_SETOF(cpu_id, &ioq_vector->affinity_mask);
617
618                 /* Setting the IRQ affinity. */
619                 err = bus_bind_intr(device, ioq_vector->msix_res, cpu_id);
620                 if (err)
621                         lio_dev_err(oct, "bus bind interrupt fail");
622 #ifdef RSS
623                 lio_dev_dbg(oct, "Bound RSS bucket %d to CPU %d\n", i, cpu_id);
624 #else
625                 lio_dev_dbg(oct, "Bound Queue %d to CPU %d\n", i, cpu_id);
626 #endif
627         }
628
629         lio_dev_dbg(oct, "MSI-X enabled\n");
630
631         res_id = num_ioq_vectors;
632         oct->msix_res = bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id,
633                                                RF_SHAREABLE | RF_ACTIVE);
634         if (oct->msix_res == NULL) {
635                 lio_dev_err(oct, "Unable to allocate bus res msix for non-ioq interrupt\n");
636                 goto err_1;
637         }
638
639         err = bus_setup_intr(device, oct->msix_res, INTR_TYPE_NET | INTR_MPSAFE,
640                              NULL, lio_intr_handler, oct, &oct->tag);
641         if (err) {
642                 bus_release_resource(device, SYS_RES_IRQ, res_id,
643                                      oct->msix_res);
644                 oct->msix_res = NULL;
645                 lio_dev_err(oct, "Failed to register intr handler");
646                 goto err_1;
647         }
648
649         bus_describe_intr(device, oct->msix_res, oct->tag, "aux");
650         oct->aux_vector = res_id;
651
652         return (0);
653 err_1:
654         if (oct->tag != NULL) {
655                 bus_teardown_intr(device, oct->msix_res, oct->tag);
656                 oct->tag = NULL;
657         }
658
659         while (i) {
660                 i--;
661                 ioq_vector--;
662
663                 if (ioq_vector->tag != NULL) {
664                         bus_teardown_intr(device, ioq_vector->msix_res,
665                                           ioq_vector->tag);
666                         ioq_vector->tag = NULL;
667                 }
668
669                 if (ioq_vector->msix_res != NULL) {
670                         bus_release_resource(device, SYS_RES_IRQ,
671                                              ioq_vector->vector,
672                                              ioq_vector->msix_res);
673                         ioq_vector->msix_res = NULL;
674                 }
675         }
676
677         if (oct->msix_res != NULL) {
678                 bus_release_resource(device, SYS_RES_IRQ, oct->aux_vector,
679                                      oct->msix_res);
680                 oct->msix_res = NULL;
681         }
682 err:
683         pci_release_msi(device);
684         lio_dev_err(oct, "MSI-X disabled\n");
685         return (1);
686 }