4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include "lio_common.h"
39 #include "lio_response_manager.h"
40 #include "lio_device.h"
44 #include "lio_network.h"
47 lio_set_feature(struct ifnet *ifp, int cmd, uint16_t param1)
49 struct lio_ctrl_pkt nctrl;
50 struct lio *lio = if_getsoftc(ifp);
51 struct octeon_device *oct = lio->oct_dev;
54 bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
57 nctrl.ncmd.s.cmd = cmd;
58 nctrl.ncmd.s.param1 = param1;
59 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
60 nctrl.wait_time = 100;
62 nctrl.cb_fn = lio_ctrl_cmd_completion;
64 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
66 lio_dev_err(oct, "Feature change failed in core (ret: 0x%x)\n",
74 lio_ctrl_cmd_completion(void *nctrl_ptr)
76 struct lio_ctrl_pkt *nctrl = (struct lio_ctrl_pkt *)nctrl_ptr;
78 struct octeon_device *oct;
83 if (lio->oct_dev == NULL)
88 switch (nctrl->ncmd.s.cmd) {
89 case LIO_CMD_CHANGE_DEVFLAGS:
90 case LIO_CMD_SET_MULTI_LIST:
93 case LIO_CMD_CHANGE_MACADDR:
94 mac = ((uint8_t *)&nctrl->udd[0]) + 2;
95 if (nctrl->ncmd.s.param1) {
96 /* vfidx is 0 based, but vf_num (param1) is 1 based */
97 int vfidx = nctrl->ncmd.s.param1 - 1;
98 bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
100 if (mac_is_admin_assigned)
101 lio_dev_info(oct, "MAC Address %pM is configured for VF %d\n",
104 lio_dev_info(oct, "MAC Address changed to %02x:%02x:%02x:%02x:%02x:%02x\n",
105 mac[0], mac[1], mac[2], mac[3], mac[4],
110 case LIO_CMD_GPIO_ACCESS:
111 lio_dev_info(oct, "LED Flashing visual identification\n");
114 case LIO_CMD_ID_ACTIVE:
115 lio_dev_info(oct, "LED Flashing visual identification\n");
118 case LIO_CMD_LRO_ENABLE:
119 lio_dev_info(oct, "HW LRO Enabled\n");
122 case LIO_CMD_LRO_DISABLE:
123 lio_dev_info(oct, "HW LRO Disabled\n");
126 case LIO_CMD_VERBOSE_ENABLE:
127 lio_dev_info(oct, "Firmware debug enabled\n");
130 case LIO_CMD_VERBOSE_DISABLE:
131 lio_dev_info(oct, "Firmware debug disabled\n");
134 case LIO_CMD_VLAN_FILTER_CTL:
135 if (nctrl->ncmd.s.param1)
136 lio_dev_info(oct, "VLAN filter enabled\n");
138 lio_dev_info(oct, "VLAN filter disabled\n");
141 case LIO_CMD_ADD_VLAN_FILTER:
142 lio_dev_info(oct, "VLAN filter %d added\n",
143 nctrl->ncmd.s.param1);
146 case LIO_CMD_DEL_VLAN_FILTER:
147 lio_dev_info(oct, "VLAN filter %d removed\n",
148 nctrl->ncmd.s.param1);
151 case LIO_CMD_SET_SETTINGS:
152 lio_dev_info(oct, "Settings changed\n");
156 * Case to handle "LIO_CMD_TNL_RX_CSUM_CTL"
157 * Command passed by NIC driver
159 case LIO_CMD_TNL_RX_CSUM_CTL:
160 if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_ENABLE) {
161 lio_dev_info(oct, "RX Checksum Offload Enabled\n");
162 } else if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_DISABLE) {
163 lio_dev_info(oct, "RX Checksum Offload Disabled\n");
168 * Case to handle "LIO_CMD_TNL_TX_CSUM_CTL"
169 * Command passed by NIC driver
171 case LIO_CMD_TNL_TX_CSUM_CTL:
172 if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_ENABLE) {
173 lio_dev_info(oct, "TX Checksum Offload Enabled\n");
174 } else if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_DISABLE) {
175 lio_dev_info(oct, "TX Checksum Offload Disabled\n");
180 * Case to handle "LIO_CMD_VXLAN_PORT_CONFIG"
181 * Command passed by NIC driver
183 case LIO_CMD_VXLAN_PORT_CONFIG:
184 if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_ADD) {
185 lio_dev_info(oct, "VxLAN Destination UDP PORT:%d ADDED\n",
186 nctrl->ncmd.s.param1);
187 } else if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_DEL) {
188 lio_dev_info(oct, "VxLAN Destination UDP PORT:%d DELETED\n",
189 nctrl->ncmd.s.param1);
193 case LIO_CMD_SET_FLOW_CTL:
194 lio_dev_info(oct, "Set RX/TX flow control parameters\n");
197 case LIO_CMD_SET_FNV:
198 if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_ENABLE)
199 lio_dev_info(oct, "FNV Enabled\n");
200 else if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_DISABLE)
201 lio_dev_info(oct, "FNV Disabled\n");
204 case LIO_CMD_PKT_STEERING_CTL:
205 if (nctrl->ncmd.s.param1 == LIO_CMD_PKT_STEERING_ENABLE) {
206 lio_dev_info(oct, "Packet Steering Enabled\n");
207 } else if (nctrl->ncmd.s.param1 ==
208 LIO_CMD_PKT_STEERING_DISABLE) {
209 lio_dev_info(oct, "Packet Steering Disabled\n");
214 case LIO_CMD_QUEUE_COUNT_CTL:
215 lio_dev_info(oct, "Queue count updated to %d\n",
216 nctrl->ncmd.s.param1);
220 lio_dev_err(oct, "%s Unknown cmd %d\n", __func__,
227 * \brief Setup output queue
228 * @param oct octeon device
229 * @param q_no which queue
230 * @param num_descs how many descriptors
231 * @param desc_size size of each descriptor
232 * @param app_ctx application context
235 lio_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
236 int desc_size, void *app_ctx)
240 lio_dev_dbg(oct, "Creating Droq: %d\n", q_no);
241 /* droq creation and local register settings. */
242 ret_val = lio_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
247 lio_dev_dbg(oct, "Using default droq %d\n", q_no);
252 * Send Credit for Octeon Output queues. Credits are always
253 * sent after the output queue is enabled.
255 lio_write_csr32(oct, oct->droq[q_no]->pkts_credit_reg,
256 oct->droq[q_no]->max_count);
262 lio_push_packet(void *m_buff, uint32_t len, union octeon_rh *rh, void *rxq,
265 struct mbuf *mbuf = m_buff;
266 struct ifnet *ifp = arg;
267 struct lio_droq *droq = rxq;
270 struct lio *lio = if_getsoftc(ifp);
272 /* Do not proceed if the interface is not in RUNNING state. */
273 if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
274 lio_recv_buffer_free(mbuf);
275 droq->stats.rx_dropped++;
279 if (rh->r_dh.has_hash) {
280 uint32_t hashtype, hashval;
282 if (rh->r_dh.has_hwtstamp) {
283 hashval = htobe32(*(uint32_t *)
284 (((uint8_t *)mbuf->m_data) +
285 ((rh->r_dh.len - 2) *
286 BYTES_PER_DHLEN_UNIT)));
288 htobe32(*(((uint32_t *)
289 (((uint8_t *)mbuf->m_data) +
290 ((rh->r_dh.len - 2) *
291 BYTES_PER_DHLEN_UNIT))) + 1));
293 hashval = htobe32(*(uint32_t *)
294 (((uint8_t *)mbuf->m_data) +
295 ((rh->r_dh.len - 1) *
296 BYTES_PER_DHLEN_UNIT)));
298 htobe32(*(((uint32_t *)
299 (((uint8_t *)mbuf->m_data) +
300 ((rh->r_dh.len - 1) *
301 BYTES_PER_DHLEN_UNIT))) + 1));
304 mbuf->m_pkthdr.flowid = hashval;
307 case LIO_RSS_HASH_IPV4:
308 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
310 case LIO_RSS_HASH_TCP_IPV4:
311 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
313 case LIO_RSS_HASH_IPV6:
314 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
316 case LIO_RSS_HASH_TCP_IPV6:
317 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
319 case LIO_RSS_HASH_IPV6_EX:
320 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6_EX);
322 case LIO_RSS_HASH_TCP_IPV6_EX:
324 M_HASHTYPE_RSS_TCP_IPV6_EX);
327 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
332 * This case won't hit as FW will always set has_hash
335 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
336 mbuf->m_pkthdr.flowid = droq->q_no;
339 m_adj(mbuf, rh->r_dh.len * 8);
340 len -= rh->r_dh.len * 8;
341 mbuf->m_flags |= M_PKTHDR;
343 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) &&
344 (rh->r_dh.priority || rh->r_dh.vlan)) {
345 uint16_t priority = rh->r_dh.priority;
346 uint16_t vid = rh->r_dh.vlan;
349 vtag = priority << 13 | vid;
350 mbuf->m_pkthdr.ether_vtag = vtag;
351 mbuf->m_flags |= M_VLANTAG;
354 if (rh->r_dh.csum_verified & LIO_IPSUM_VERIFIED)
355 mbuf->m_pkthdr.csum_flags |= (CSUM_L3_CALC |
358 if (rh->r_dh.csum_verified & LIO_L4SUM_VERIFIED) {
359 mbuf->m_pkthdr.csum_flags |= (CSUM_L4_CALC |
361 mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
363 mbuf->m_pkthdr.csum_data = htons(0xffff);
366 mbuf->m_pkthdr.rcvif = ifp;
367 mbuf->m_pkthdr.len = len;
369 if ((lio_hwlro == 0) &&
370 (if_getcapenable(ifp) & IFCAP_LRO) &&
371 (mbuf->m_pkthdr.csum_flags &
372 (CSUM_L3_VALID | CSUM_L4_VALID | CSUM_DATA_VALID |
373 CSUM_PSEUDO_HDR)) == (CSUM_L3_VALID | CSUM_L4_VALID |
376 if (droq->lro.lro_cnt) {
377 if (tcp_lro_rx(&droq->lro, mbuf, 0) == 0) {
378 droq->stats.rx_bytes_received += len;
379 droq->stats.rx_pkts_received++;
387 droq->stats.rx_bytes_received += len;
388 droq->stats.rx_pkts_received++;
391 lio_recv_buffer_free(mbuf);
392 droq->stats.rx_dropped++;
397 * \brief Setup input and output queues
398 * @param octeon_dev octeon device
399 * @param ifidx Interface Index
401 * Note: Queues are with respect to the octeon device. Thus
402 * an input queue is for egress packets, and output queues
403 * are for ingress packets.
406 lio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
407 uint32_t num_iqs, uint32_t num_oqs)
409 struct lio_droq_ops droq_ops;
411 struct lio_droq *droq;
413 static int cpu_id, cpu_id_modulus;
414 int num_tx_descs, q, q_no, retval = 0;
416 ifp = octeon_dev->props.ifp;
418 lio = if_getsoftc(ifp);
420 bzero(&droq_ops, sizeof(struct lio_droq_ops));
422 droq_ops.fptr = lio_push_packet;
423 droq_ops.farg = (void *)ifp;
426 cpu_id_modulus = mp_ncpus;
428 for (q = 0; q < num_oqs; q++) {
429 q_no = lio->linfo.rxpciq[q].s.q_no;
430 lio_dev_dbg(octeon_dev, "lio_setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
432 retval = lio_setup_droq(octeon_dev, q_no,
433 LIO_GET_NUM_RX_DESCS_NIC_IF_CFG(
434 lio_get_conf(octeon_dev),
436 LIO_GET_NUM_RX_BUF_SIZE_NIC_IF_CFG(
437 lio_get_conf(octeon_dev),
440 lio_dev_err(octeon_dev, "%s : Runtime DROQ(RxQ) creation failed.\n",
445 droq = octeon_dev->droq[q_no];
447 /* designate a CPU for this droq */
448 droq->cpu_id = cpu_id;
450 if (cpu_id >= cpu_id_modulus)
453 lio_register_droq_ops(octeon_dev, q_no, &droq_ops);
457 for (q = 0; q < num_iqs; q++) {
458 num_tx_descs = LIO_GET_NUM_TX_DESCS_NIC_IF_CFG(
459 lio_get_conf(octeon_dev),
461 retval = lio_setup_iq(octeon_dev, ifidx, q,
462 lio->linfo.txpciq[q], num_tx_descs);
464 lio_dev_err(octeon_dev, " %s : Runtime IQ(TxQ) creation failed.\n",
474 * \brief Droq packet processor sceduler
475 * @param oct octeon device
478 lio_schedule_droq_pkt_handlers(struct octeon_device *oct)
480 struct lio_droq *droq;
483 if (oct->int_status & LIO_DEV_INTR_PKT_DATA) {
484 for (oq_no = 0; oq_no < LIO_MAX_OUTPUT_QUEUES(oct); oq_no++) {
485 if (!(oct->io_qmask.oq & BIT_ULL(oq_no)))
488 droq = oct->droq[oq_no];
490 taskqueue_enqueue(droq->droq_taskqueue,
497 lio_msix_intr_handler(void *vector)
499 struct lio_ioq_vector *ioq_vector = (struct lio_ioq_vector *)vector;
500 struct octeon_device *oct = ioq_vector->oct_dev;
501 struct lio_droq *droq = oct->droq[ioq_vector->droq_index];
504 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
506 if ((ret & LIO_MSIX_PO_INT) || (ret & LIO_MSIX_PI_INT)) {
507 struct lio_instr_queue *iq = oct->instr_queue[droq->q_no];
508 int reschedule, tx_done = 1;
510 reschedule = lio_droq_process_packets(oct, droq, oct->rx_budget);
512 if (atomic_load_acq_int(&iq->instr_pending))
513 tx_done = lio_flush_iq(oct, iq, oct->tx_budget);
515 if ((oct->props.ifp != NULL) && (iq->br != NULL)) {
516 if (mtx_trylock(&iq->enq_lock)) {
517 if (!drbr_empty(oct->props.ifp, iq->br))
518 lio_mq_start_locked(oct->props.ifp,
520 mtx_unlock(&iq->enq_lock);
524 if (reschedule || !tx_done)
525 taskqueue_enqueue(droq->droq_taskqueue, &droq->droq_task);
527 lio_enable_irq(droq, iq);
532 lio_intr_handler(void *dev)
534 struct octeon_device *oct = (struct octeon_device *)dev;
536 /* Disable our interrupts for the duration of ISR */
537 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
539 oct->fn_list.process_interrupt_regs(oct);
541 lio_schedule_droq_pkt_handlers(oct);
543 /* Re-enable our interrupts */
544 if (!(atomic_load_acq_int(&oct->status) == LIO_DEV_IN_RESET))
545 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
549 lio_setup_interrupt(struct octeon_device *oct, uint32_t num_ioqs)
552 struct lio_ioq_vector *ioq_vector;
554 int num_alloc_ioq_vectors;
561 ioq_vector = oct->ioq_vector;
564 if (oct->sriov_info.num_pf_rings != rss_getnumbuckets()) {
565 lio_dev_info(oct, "IOQ vectors (%d) are not equal number of RSS buckets (%d)\n",
566 oct->sriov_info.num_pf_rings, rss_getnumbuckets());
570 device = oct->device;
572 oct->num_msix_irqs = num_ioqs;
573 /* one non ioq interrupt for handling sli_mac_pf_int_sum */
574 oct->num_msix_irqs += 1;
575 num_alloc_ioq_vectors = oct->num_msix_irqs;
577 if (pci_alloc_msix(device, &num_alloc_ioq_vectors) ||
578 (num_alloc_ioq_vectors != oct->num_msix_irqs))
581 num_ioq_vectors = oct->num_msix_irqs;
583 /* For PF, there is one non-ioq interrupt handler */
584 for (i = 0; i < num_ioq_vectors - 1; i++, ioq_vector++) {
587 ioq_vector->msix_res =
588 bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id,
589 RF_SHAREABLE | RF_ACTIVE);
590 if (ioq_vector->msix_res == NULL) {
592 "Unable to allocate bus res msix[%d]\n", i);
596 err = bus_setup_intr(device, ioq_vector->msix_res,
597 INTR_TYPE_NET | INTR_MPSAFE, NULL,
598 lio_msix_intr_handler, ioq_vector,
601 bus_release_resource(device, SYS_RES_IRQ, res_id,
602 ioq_vector->msix_res);
603 ioq_vector->msix_res = NULL;
604 lio_dev_err(oct, "Failed to register intr handler");
608 bus_describe_intr(device, ioq_vector->msix_res, ioq_vector->tag,
610 ioq_vector->vector = res_id;
613 cpu_id = rss_getcpu(i % rss_getnumbuckets());
615 cpu_id = i % mp_ncpus;
617 CPU_SETOF(cpu_id, &ioq_vector->affinity_mask);
619 /* Setting the IRQ affinity. */
620 err = bus_bind_intr(device, ioq_vector->msix_res, cpu_id);
622 lio_dev_err(oct, "bus bind interrupt fail");
624 lio_dev_dbg(oct, "Bound RSS bucket %d to CPU %d\n", i, cpu_id);
626 lio_dev_dbg(oct, "Bound Queue %d to CPU %d\n", i, cpu_id);
630 lio_dev_dbg(oct, "MSI-X enabled\n");
632 res_id = num_ioq_vectors;
633 oct->msix_res = bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id,
634 RF_SHAREABLE | RF_ACTIVE);
635 if (oct->msix_res == NULL) {
636 lio_dev_err(oct, "Unable to allocate bus res msix for non-ioq interrupt\n");
640 err = bus_setup_intr(device, oct->msix_res, INTR_TYPE_NET | INTR_MPSAFE,
641 NULL, lio_intr_handler, oct, &oct->tag);
643 bus_release_resource(device, SYS_RES_IRQ, res_id,
645 oct->msix_res = NULL;
646 lio_dev_err(oct, "Failed to register intr handler");
650 bus_describe_intr(device, oct->msix_res, oct->tag, "aux");
651 oct->aux_vector = res_id;
655 if (oct->tag != NULL) {
656 bus_teardown_intr(device, oct->msix_res, oct->tag);
664 if (ioq_vector->tag != NULL) {
665 bus_teardown_intr(device, ioq_vector->msix_res,
667 ioq_vector->tag = NULL;
670 if (ioq_vector->msix_res != NULL) {
671 bus_release_resource(device, SYS_RES_IRQ,
673 ioq_vector->msix_res);
674 ioq_vector->msix_res = NULL;
678 if (oct->msix_res != NULL) {
679 bus_release_resource(device, SYS_RES_IRQ, oct->aux_vector,
681 oct->msix_res = NULL;
684 pci_release_msi(device);
685 lio_dev_err(oct, "MSI-X disabled\n");