4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include "lio_common.h"
39 #include "lio_response_manager.h"
40 #include "lio_device.h"
43 #include "lio_network.h"
44 #include "cn23xx_pf_device.h"
45 #include "lio_image.h"
46 #include "lio_ioctl.h"
50 /* Number of milliseconds to wait for DDR initialization */
51 #define LIO_DDR_TIMEOUT 10000
52 #define LIO_MAX_FW_TYPE_LEN 8
54 static char fw_type[LIO_MAX_FW_TYPE_LEN];
55 TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type));
58 * Integers that specify number of queues per PF.
59 * Valid range is 0 to 64.
60 * Use 0 to derive from CPU count.
62 static int num_queues_per_pf0;
63 static int num_queues_per_pf1;
64 TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0);
65 TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1);
68 static int lio_rss = 1;
69 TUNABLE_INT("hw.lio.rss", &lio_rss);
73 unsigned int lio_hwlro = 0;
74 TUNABLE_INT("hw.lio.hwlro", &lio_hwlro);
77 * Bitmask indicating which consoles have debug
78 * output redirected to syslog.
80 static unsigned long console_bitmask;
81 TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask);
84 * \brief determines if a given console has debug enabled.
85 * @param console console to check
86 * @returns 1 = enabled. 0 otherwise
89 lio_console_debug_enabled(uint32_t console)
92 return (console_bitmask >> (console)) & 0x1;
95 static int lio_detach(device_t dev);
97 static int lio_device_init(struct octeon_device *octeon_dev);
98 static int lio_chip_specific_setup(struct octeon_device *oct);
99 static void lio_watchdog(void *param);
100 static int lio_load_firmware(struct octeon_device *oct);
101 static int lio_nic_starter(struct octeon_device *oct);
102 static int lio_init_nic_module(struct octeon_device *oct);
103 static int lio_setup_nic_devices(struct octeon_device *octeon_dev);
104 static int lio_link_info(struct lio_recv_info *recv_info, void *ptr);
105 static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status,
107 static int lio_set_rxcsum_command(if_t ifp, int command,
109 static int lio_setup_glists(struct octeon_device *oct, struct lio *lio,
111 static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx);
112 static inline void lio_update_link_status(if_t ifp,
113 union octeon_link_status *ls);
114 static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop);
115 static int lio_stop_nic_module(struct octeon_device *oct);
116 static void lio_destroy_resources(struct octeon_device *oct);
117 static int lio_setup_rx_oom_poll_fn(if_t ifp);
119 static void lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid);
120 static void lio_vlan_rx_kill_vid(void *arg, if_t ifp,
122 static struct octeon_device *
123 lio_get_other_octeon_device(struct octeon_device *oct);
125 static int lio_wait_for_oq_pkts(struct octeon_device *oct);
127 int lio_send_rss_param(struct lio *lio);
128 static int lio_dbg_console_print(struct octeon_device *oct,
129 uint32_t console_num, char *prefix,
132 /* Polling interval for determining when NIC application is alive */
133 #define LIO_STARTER_POLL_INTERVAL_MS 100
137 * This array contains the list of IDs on which the driver should load.
139 struct lio_vendor_info {
142 uint16_t subdevice_id;
147 static struct lio_vendor_info lio_pci_tbl[] = {
149 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE,
153 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1,
157 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE,
161 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE,
165 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE,
171 static char *lio_strings[] = {
172 "LiquidIO 2350 10GbE Server Adapter",
173 "LiquidIO 2360 10GbE Server Adapter",
174 "LiquidIO 2350 25GbE Server Adapter",
175 "LiquidIO 2360 25GbE Server Adapter",
178 struct lio_if_cfg_resp {
180 struct octeon_if_cfg_info cfg_info;
184 struct lio_if_cfg_context {
189 struct lio_rx_ctl_context {
195 lio_probe(device_t dev)
197 struct lio_vendor_info *tbl;
201 uint16_t subdevice_id;
203 char device_ver[256];
205 vendor_id = pci_get_vendor(dev);
206 if (vendor_id != PCI_VENDOR_ID_CAVIUM)
209 device_id = pci_get_device(dev);
210 subdevice_id = pci_get_subdevice(dev);
211 revision_id = pci_get_revid(dev);
214 while (tbl->vendor_id) {
215 if ((vendor_id == tbl->vendor_id) &&
216 (device_id == tbl->device_id) &&
217 (subdevice_id == tbl->subdevice_id) &&
218 (revision_id == tbl->revision_id)) {
219 sprintf(device_ver, "%s, Version - %s",
220 lio_strings[tbl->index], LIO_VERSION);
221 device_set_desc_copy(dev, device_ver);
222 return (BUS_PROBE_DEFAULT);
232 lio_attach(device_t device)
234 struct octeon_device *oct_dev = NULL;
237 int timeout, ret = 1;
238 uint8_t bus, dev, function;
240 oct_dev = lio_allocate_device(device);
241 if (oct_dev == NULL) {
242 device_printf(device, "Error: Unable to allocate device\n");
246 oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET;
247 oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET;
248 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
250 oct_dev->device = device;
251 bus = pci_get_bus(device);
252 dev = pci_get_slot(device);
253 function = pci_get_function(device);
255 lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n",
256 pci_get_vendor(device), pci_get_device(device), bus, dev,
259 if (lio_device_init(oct_dev)) {
260 lio_dev_err(oct_dev, "Failed to init device\n");
265 scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
266 if (!(scratch1 & 4ULL)) {
268 * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
269 * the lio watchdog kernel thread is running for this
270 * NIC. Each NIC gets one watchdog kernel thread.
273 lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
275 error = kproc_create(lio_watchdog, oct_dev,
276 &oct_dev->watchdog_task, 0, 0,
277 "liowd/%02hhx:%02hhx.%hhx", bus,
280 kproc_resume(oct_dev->watchdog_task);
282 oct_dev->watchdog_task = NULL;
284 "failed to create kernel_thread\n");
289 oct_dev->rx_pause = 1;
290 oct_dev->tx_pause = 1;
293 while (timeout < LIO_NIC_STARTER_TIMEOUT) {
294 lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS);
295 timeout += LIO_STARTER_POLL_INTERVAL_MS;
298 * During the boot process interrupts are not available.
299 * So polling for first control message from FW.
302 lio_droq_bh(oct_dev->droq[0], 0);
304 if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) {
305 ret = lio_nic_starter(oct_dev);
311 lio_dev_err(oct_dev, "Firmware failed to start\n");
316 lio_dev_dbg(oct_dev, "Device is ready\n");
322 lio_detach(device_t dev)
324 struct octeon_device *oct_dev = device_get_softc(dev);
326 lio_dev_dbg(oct_dev, "Stopping device\n");
327 if (oct_dev->watchdog_task) {
330 kproc_suspend(oct_dev->watchdog_task, 0);
332 scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
334 lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
337 if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP))
338 lio_stop_nic_module(oct_dev);
341 * Reset the octeon device and cleanup all memory allocated for
342 * the octeon device by driver.
344 lio_destroy_resources(oct_dev);
346 lio_dev_info(oct_dev, "Device removed\n");
349 * This octeon device has been removed. Update the global
350 * data structure to reflect this. Free the device structure.
352 lio_free_device_mem(oct_dev);
357 lio_shutdown(device_t dev)
359 struct octeon_device *oct_dev = device_get_softc(dev);
360 struct lio *lio = if_getsoftc(oct_dev->props.ifp);
362 lio_send_rx_ctrl_cmd(lio, 0);
368 lio_suspend(device_t dev)
375 lio_resume(device_t dev)
382 lio_event(struct module *mod, int event, void *junk)
387 lio_init_device_list(LIO_CFG_TYPE_DEFAULT);
396 /*********************************************************************
397 * FreeBSD Device Interface Entry Points
398 * *******************************************************************/
399 static device_method_t lio_methods[] = {
400 /* Device interface */
401 DEVMETHOD(device_probe, lio_probe),
402 DEVMETHOD(device_attach, lio_attach),
403 DEVMETHOD(device_detach, lio_detach),
404 DEVMETHOD(device_shutdown, lio_shutdown),
405 DEVMETHOD(device_suspend, lio_suspend),
406 DEVMETHOD(device_resume, lio_resume),
410 static driver_t lio_driver = {
411 LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device),
414 DRIVER_MODULE(lio, pci, lio_driver, lio_event, NULL);
416 MODULE_DEPEND(lio, pci, 1, 1, 1);
417 MODULE_DEPEND(lio, ether, 1, 1, 1);
418 MODULE_DEPEND(lio, firmware, 1, 1, 1);
421 fw_type_is_none(void)
423 return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
424 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
428 * \brief Device initialization for each Octeon device that is probed
429 * @param octeon_dev octeon device
432 lio_device_init(struct octeon_device *octeon_dev)
434 unsigned long ddr_timeout = LIO_DDR_TIMEOUT;
435 char *dbg_enb = NULL;
438 uint8_t bus, dev, function;
439 char bootcmd[] = "\n";
441 bus = pci_get_bus(octeon_dev->device);
442 dev = pci_get_slot(octeon_dev->device);
443 function = pci_get_function(octeon_dev->device);
445 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE);
447 /* Enable access to the octeon device */
448 if (pci_enable_busmaster(octeon_dev->device)) {
449 lio_dev_err(octeon_dev, "pci_enable_device failed\n");
453 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE);
455 /* Identify the Octeon type and map the BAR address space. */
456 if (lio_chip_specific_setup(octeon_dev)) {
457 lio_dev_err(octeon_dev, "Chip specific setup failed\n");
461 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE);
464 * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
465 * since that is what is required for the reference to be removed
466 * during de-initialization (see 'octeon_destroy_resources').
468 lio_register_device(octeon_dev, bus, dev, function, true);
471 octeon_dev->app_mode = LIO_DRV_INVALID_APP;
473 if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) {
475 /* Do a soft reset of the Octeon device. */
476 if (octeon_dev->fn_list.soft_reset(octeon_dev))
479 /* things might have changed */
480 if (!lio_cn23xx_pf_fw_loaded(octeon_dev))
489 * Initialize the dispatch mechanism used to push packets arriving on
490 * Octeon Output queues.
492 if (lio_init_dispatch_list(octeon_dev))
495 lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
496 LIO_OPCODE_NIC_CORE_DRV_ACTIVE,
497 lio_core_drv_init, octeon_dev);
498 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE);
500 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
502 lio_dev_err(octeon_dev,
503 "Failed to configure device registers\n");
507 /* Initialize soft command buffer pool */
508 if (lio_setup_sc_buffer_pool(octeon_dev)) {
509 lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n");
513 atomic_store_rel_int(&octeon_dev->status,
514 LIO_DEV_SC_BUFF_POOL_INIT_DONE);
516 if (lio_allocate_ioq_vector(octeon_dev)) {
517 lio_dev_err(octeon_dev,
518 "IOQ vector allocation failed\n");
522 atomic_store_rel_int(&octeon_dev->status,
523 LIO_DEV_MSIX_ALLOC_VECTOR_DONE);
525 for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
526 octeon_dev->instr_queue[i] =
527 malloc(sizeof(struct lio_instr_queue),
528 M_DEVBUF, M_NOWAIT | M_ZERO);
529 if (octeon_dev->instr_queue[i] == NULL)
533 /* Setup the data structures that manage this Octeon's Input queues. */
534 if (lio_setup_instr_queue0(octeon_dev)) {
535 lio_dev_err(octeon_dev,
536 "Instruction queue initialization failed\n");
540 atomic_store_rel_int(&octeon_dev->status,
541 LIO_DEV_INSTR_QUEUE_INIT_DONE);
544 * Initialize lists to manage the requests of different types that
545 * arrive from user & kernel applications for this octeon device.
548 if (lio_setup_response_list(octeon_dev)) {
549 lio_dev_err(octeon_dev, "Response list allocation failed\n");
553 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE);
555 for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
556 octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]),
557 M_DEVBUF, M_NOWAIT | M_ZERO);
558 if (octeon_dev->droq[i] == NULL)
562 if (lio_setup_output_queue0(octeon_dev)) {
563 lio_dev_err(octeon_dev, "Output queue initialization failed\n");
567 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE);
570 * Setup the interrupt handler and record the INT SUM register address
572 if (lio_setup_interrupt(octeon_dev,
573 octeon_dev->sriov_info.num_pf_rings))
576 /* Enable Octeon device interrupts */
577 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
579 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE);
582 * Send Credit for Octeon Output queues. Credits are always sent BEFORE
583 * the output queue is enabled.
584 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
585 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
586 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
587 * before any credits have been issued, causing the ring to be reset
588 * (and the f/w appear to never have started).
590 for (j = 0; j < octeon_dev->num_oqs; j++)
591 lio_write_csr32(octeon_dev,
592 octeon_dev->droq[j]->pkts_credit_reg,
593 octeon_dev->droq[j]->max_count);
595 /* Enable the input and output queues for this Octeon device */
596 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
598 lio_dev_err(octeon_dev, "Failed to enable input/output queues");
602 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE);
605 lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n");
607 lio_dev_info(octeon_dev,
608 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
611 lio_sleep_timeout(LIO_RESET_MSECS);
614 * Wait for the octeon to initialize DDR after the
617 while (!ddr_timeout) {
618 if (pause("-", lio_ms_to_ticks(100))) {
619 /* user probably pressed Control-C */
624 ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout);
626 lio_dev_err(octeon_dev,
627 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
632 if (lio_wait_for_bootloader(octeon_dev, 1100)) {
633 lio_dev_err(octeon_dev, "Board not responding\n");
637 /* Divert uboot to take commands from host instead. */
638 ret = lio_console_send_cmd(octeon_dev, bootcmd, 50);
640 lio_dev_dbg(octeon_dev, "Initializing consoles\n");
641 ret = lio_init_consoles(octeon_dev);
643 lio_dev_err(octeon_dev, "Could not access board consoles\n");
648 * If console debug enabled, specify empty string to
649 * use default enablement ELSE specify NULL string for
652 dbg_enb = lio_console_debug_enabled(0) ? "" : NULL;
653 ret = lio_add_console(octeon_dev, 0, dbg_enb);
656 lio_dev_err(octeon_dev, "Could not access board console\n");
658 } else if (lio_console_debug_enabled(0)) {
660 * If console was added AND we're logging console output
661 * then set our console print function.
663 octeon_dev->console[0].print = lio_dbg_console_print;
666 atomic_store_rel_int(&octeon_dev->status,
667 LIO_DEV_CONSOLE_INIT_DONE);
669 lio_dev_dbg(octeon_dev, "Loading firmware\n");
671 ret = lio_load_firmware(octeon_dev);
673 lio_dev_err(octeon_dev, "Could not load firmware to board\n");
678 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK);
684 * \brief PCI FLR for each Octeon device.
685 * @param oct octeon device
688 lio_pci_flr(struct octeon_device *oct)
690 uint32_t exppos, status;
692 pci_find_cap(oct->device, PCIY_EXPRESS, &exppos);
694 pci_save_state(oct->device);
696 /* Quiesce the device completely */
697 pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2);
699 /* Wait for Transaction Pending bit clean */
702 status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
703 if (status & PCIEM_STA_TRANSACTION_PND) {
704 lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
707 status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
708 if (status & PCIEM_STA_TRANSACTION_PND)
709 lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n");
712 pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2);
715 pci_restore_state(oct->device);
719 * \brief Debug console print function
720 * @param octeon_dev octeon device
721 * @param console_num console number
722 * @param prefix first portion of line to display
723 * @param suffix second portion of line to display
725 * The OCTEON debug console outputs entire lines (excluding '\n').
726 * Normally, the line will be passed in the 'prefix' parameter.
727 * However, due to buffering, it is possible for a line to be split into two
728 * parts, in which case they will be passed as the 'prefix' parameter and
729 * 'suffix' parameter.
732 lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num,
733 char *prefix, char *suffix)
736 if (prefix != NULL && suffix != NULL)
737 lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix);
738 else if (prefix != NULL)
739 lio_dev_info(oct, "%u: %s\n", console_num, prefix);
740 else if (suffix != NULL)
741 lio_dev_info(oct, "%u: %s\n", console_num, suffix);
747 lio_watchdog(void *param)
750 uint16_t mask_of_crashed_or_stuck_cores = 0;
751 struct octeon_device *oct = param;
752 bool err_msg_was_printed[12];
754 bzero(err_msg_was_printed, sizeof(err_msg_was_printed));
757 kproc_suspend_check(oct->watchdog_task);
758 mask_of_crashed_or_stuck_cores =
759 (uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2);
761 if (mask_of_crashed_or_stuck_cores) {
762 struct octeon_device *other_oct;
764 oct->cores_crashed = true;
765 other_oct = lio_get_other_octeon_device(oct);
766 if (other_oct != NULL)
767 other_oct->cores_crashed = true;
769 for (core_num = 0; core_num < LIO_MAX_CORES;
771 bool core_crashed_or_got_stuck;
773 core_crashed_or_got_stuck =
774 (mask_of_crashed_or_stuck_cores >>
776 if (core_crashed_or_got_stuck &&
777 !err_msg_was_printed[core_num]) {
779 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
781 err_msg_was_printed[core_num] = true;
787 /* sleep for two seconds */
788 pause("-", lio_ms_to_ticks(2000));
793 lio_chip_specific_setup(struct octeon_device *oct)
799 dev_id = lio_read_pci_cfg(oct, 0);
800 oct->subdevice_id = pci_get_subdevice(oct->device);
803 case LIO_CN23XX_PF_PCIID:
804 oct->chip_id = LIO_CN23XX_PF_VID;
805 if (pci_get_function(oct->device) == 0) {
806 if (num_queues_per_pf0 < 0) {
807 lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n",
809 num_queues_per_pf0 = 0;
812 oct->sriov_info.num_pf_rings = num_queues_per_pf0;
814 if (num_queues_per_pf1 < 0) {
815 lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n",
817 num_queues_per_pf1 = 0;
820 oct->sriov_info.num_pf_rings = num_queues_per_pf1;
823 ret = lio_cn23xx_pf_setup_device(oct);
829 lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id);
833 lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s,
834 OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct),
835 lio_get_conf(oct)->card_name, LIO_VERSION);
840 static struct octeon_device *
841 lio_get_other_octeon_device(struct octeon_device *oct)
843 struct octeon_device *other_oct;
845 other_oct = lio_get_device(oct->octeon_id + 1);
847 if ((other_oct != NULL) && other_oct->device) {
848 int oct_busnum, other_oct_busnum;
850 oct_busnum = pci_get_bus(oct->device);
851 other_oct_busnum = pci_get_bus(other_oct->device);
853 if (oct_busnum == other_oct_busnum) {
854 int oct_slot, other_oct_slot;
856 oct_slot = pci_get_slot(oct->device);
857 other_oct_slot = pci_get_slot(other_oct->device);
859 if (oct_slot == other_oct_slot)
867 * \brief Load firmware to device
868 * @param oct octeon device
870 * Maps device to firmware filename, requests firmware, and downloads it
873 lio_load_firmware(struct octeon_device *oct)
875 const struct firmware *fw;
876 char *tmp_fw_type = NULL;
878 char fw_name[LIO_MAX_FW_FILENAME_LEN];
880 if (fw_type[0] == '\0')
881 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
883 tmp_fw_type = fw_type;
885 sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME,
886 lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX);
888 fw = firmware_get(fw_name);
890 lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n",
895 ret = lio_download_firmware(oct, fw->data, fw->datasize);
897 firmware_put(fw, FIRMWARE_UNLOAD);
903 lio_nic_starter(struct octeon_device *oct)
907 atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING);
909 if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) {
910 if (lio_init_nic_module(oct)) {
911 lio_dev_err(oct, "NIC initialization failed\n");
913 #ifdef CAVIUM_ONiLY_23XX_VF
915 if (octeon_enable_sriov(oct) < 0)
921 "Unexpected application running on NIC (%d). Check firmware.\n",
930 lio_init_nic_module(struct octeon_device *oct)
932 int num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct));
935 lio_dev_dbg(oct, "Initializing network interfaces\n");
938 * only default iq and oq were initialized
939 * initialize the rest as well
942 /* run port_config command for each port */
943 oct->ifcount = num_nic_ports;
945 bzero(&oct->props, sizeof(struct lio_if_props));
947 oct->props.gmxport = -1;
949 retval = lio_setup_nic_devices(oct);
951 lio_dev_err(oct, "Setup NIC devices failed\n");
952 goto lio_init_failure;
955 lio_dev_dbg(oct, "Network interfaces ready\n");
967 lio_ifmedia_update(if_t ifp)
969 struct lio *lio = if_getsoftc(ifp);
974 /* We only support Ethernet media type. */
975 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
978 switch (IFM_SUBTYPE(ifm->ifm_media)) {
986 /* We don't support changing the media type. */
987 lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n",
988 IFM_SUBTYPE(ifm->ifm_media));
996 lio_get_media_subtype(struct octeon_device *oct)
999 switch(oct->subdevice_id) {
1000 case LIO_CN2350_10G_SUBDEVICE:
1001 case LIO_CN2350_10G_SUBDEVICE1:
1002 case LIO_CN2360_10G_SUBDEVICE:
1003 return (IFM_10G_SR);
1005 case LIO_CN2350_25G_SUBDEVICE:
1006 case LIO_CN2360_25G_SUBDEVICE:
1007 return (IFM_25G_SR);
1010 return (IFM_10G_SR);
1014 lio_get_baudrate(struct octeon_device *oct)
1017 switch(oct->subdevice_id) {
1018 case LIO_CN2350_10G_SUBDEVICE:
1019 case LIO_CN2350_10G_SUBDEVICE1:
1020 case LIO_CN2360_10G_SUBDEVICE:
1021 return (IF_Gbps(10));
1023 case LIO_CN2350_25G_SUBDEVICE:
1024 case LIO_CN2360_25G_SUBDEVICE:
1025 return (IF_Gbps(25));
1028 return (IF_Gbps(10));
1032 lio_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
1034 struct lio *lio = if_getsoftc(ifp);
1036 /* Report link down if the driver isn't running. */
1037 if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
1038 ifmr->ifm_active |= IFM_NONE;
1042 /* Setup the default interface info. */
1043 ifmr->ifm_status = IFM_AVALID;
1044 ifmr->ifm_active = IFM_ETHER;
1046 if (lio->linfo.link.s.link_up) {
1047 ifmr->ifm_status |= IFM_ACTIVE;
1049 ifmr->ifm_active |= IFM_NONE;
1053 ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev);
1055 if (lio->linfo.link.s.duplex)
1056 ifmr->ifm_active |= IFM_FDX;
1058 ifmr->ifm_active |= IFM_HDX;
1062 lio_get_counter(if_t ifp, ift_counter cnt)
1064 struct lio *lio = if_getsoftc(ifp);
1065 struct octeon_device *oct = lio->oct_dev;
1066 uint64_t counter = 0;
1070 case IFCOUNTER_IPACKETS:
1071 for (i = 0; i < oct->num_oqs; i++) {
1072 q_no = lio->linfo.rxpciq[i].s.q_no;
1073 counter += oct->droq[q_no]->stats.rx_pkts_received;
1076 case IFCOUNTER_OPACKETS:
1077 for (i = 0; i < oct->num_iqs; i++) {
1078 q_no = lio->linfo.txpciq[i].s.q_no;
1079 counter += oct->instr_queue[q_no]->stats.tx_done;
1082 case IFCOUNTER_IBYTES:
1083 for (i = 0; i < oct->num_oqs; i++) {
1084 q_no = lio->linfo.rxpciq[i].s.q_no;
1085 counter += oct->droq[q_no]->stats.rx_bytes_received;
1088 case IFCOUNTER_OBYTES:
1089 for (i = 0; i < oct->num_iqs; i++) {
1090 q_no = lio->linfo.txpciq[i].s.q_no;
1091 counter += oct->instr_queue[q_no]->stats.tx_tot_bytes;
1094 case IFCOUNTER_IQDROPS:
1095 for (i = 0; i < oct->num_oqs; i++) {
1096 q_no = lio->linfo.rxpciq[i].s.q_no;
1097 counter += oct->droq[q_no]->stats.rx_dropped;
1100 case IFCOUNTER_OQDROPS:
1101 for (i = 0; i < oct->num_iqs; i++) {
1102 q_no = lio->linfo.txpciq[i].s.q_no;
1103 counter += oct->instr_queue[q_no]->stats.tx_dropped;
1106 case IFCOUNTER_IMCASTS:
1107 counter = oct->link_stats.fromwire.total_mcst;
1109 case IFCOUNTER_OMCASTS:
1110 counter = oct->link_stats.fromhost.mcast_pkts_sent;
1112 case IFCOUNTER_COLLISIONS:
1113 counter = oct->link_stats.fromhost.total_collisions;
1115 case IFCOUNTER_IERRORS:
1116 counter = oct->link_stats.fromwire.fcs_err +
1117 oct->link_stats.fromwire.l2_err +
1118 oct->link_stats.fromwire.frame_err;
1121 return (if_get_counter_default(ifp, cnt));
1128 lio_init_ifnet(struct lio *lio)
1130 struct octeon_device *oct = lio->oct_dev;
1131 if_t ifp = lio->ifp;
1133 /* ifconfig entrypoint for media type/status reporting */
1134 ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update,
1135 lio_ifmedia_status);
1137 /* set the default interface values */
1138 ifmedia_add(&lio->ifmedia,
1139 (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)),
1141 ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
1142 ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO));
1144 lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media;
1145 lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media);
1147 if_initname(ifp, device_get_name(oct->device),
1148 device_get_unit(oct->device));
1149 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
1150 if_setioctlfn(ifp, lio_ioctl);
1151 if_setgetcounterfn(ifp, lio_get_counter);
1152 if_settransmitfn(ifp, lio_mq_start);
1153 if_setqflushfn(ifp, lio_qflush);
1154 if_setinitfn(ifp, lio_open);
1155 if_setmtu(ifp, lio->linfo.link.s.mtu);
1156 lio->mtu = lio->linfo.link.s.mtu;
1157 if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1158 CSUM_TCP_IPV6 | CSUM_UDP_IPV6));
1160 if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1161 IFCAP_TSO | IFCAP_LRO |
1162 IFCAP_JUMBO_MTU | IFCAP_HWSTATS |
1163 IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER |
1164 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING |
1165 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0);
1167 if_setcapenable(ifp, if_getcapabilities(ifp));
1168 if_setbaudrate(ifp, lio_get_baudrate(oct));
1174 lio_tcp_lro_free(struct octeon_device *octeon_dev, if_t ifp)
1176 struct lio *lio = if_getsoftc(ifp);
1177 struct lio_droq *droq;
1181 for (i = 0; i < octeon_dev->num_oqs; i++) {
1182 q_no = lio->linfo.rxpciq[i].s.q_no;
1183 droq = octeon_dev->droq[q_no];
1184 if (droq->lro.ifp) {
1185 tcp_lro_free(&droq->lro);
1186 droq->lro.ifp = NULL;
1192 lio_tcp_lro_init(struct octeon_device *octeon_dev, if_t ifp)
1194 struct lio *lio = if_getsoftc(ifp);
1195 struct lio_droq *droq;
1196 struct lro_ctrl *lro;
1197 int i, q_no, ret = 0;
1199 for (i = 0; i < octeon_dev->num_oqs; i++) {
1200 q_no = lio->linfo.rxpciq[i].s.q_no;
1201 droq = octeon_dev->droq[q_no];
1203 ret = tcp_lro_init(lro);
1205 lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n",
1207 goto lro_init_failed;
1216 lio_tcp_lro_free(octeon_dev, ifp);
1222 lio_setup_nic_devices(struct octeon_device *octeon_dev)
1224 union octeon_if_cfg if_cfg;
1225 struct lio *lio = NULL;
1227 struct lio_version *vdata;
1228 struct lio_soft_command *sc;
1229 struct lio_if_cfg_context *ctx;
1230 struct lio_if_cfg_resp *resp;
1231 struct lio_if_props *props;
1232 int num_iqueues, num_oqueues, retval;
1233 unsigned int base_queue;
1234 unsigned int gmx_port_id;
1235 uint32_t ctx_size, data_size;
1236 uint32_t ifidx_or_pfnum, resp_size;
1237 uint8_t mac[ETHER_HDR_LEN], i, j;
1239 /* This is to handle link status changes */
1240 lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
1241 LIO_OPCODE_NIC_INFO,
1242 lio_link_info, octeon_dev);
1244 for (i = 0; i < octeon_dev->ifcount; i++) {
1245 resp_size = sizeof(struct lio_if_cfg_resp);
1246 ctx_size = sizeof(struct lio_if_cfg_context);
1247 data_size = sizeof(struct lio_version);
1248 sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size,
1253 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1254 ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1255 vdata = (struct lio_version *)sc->virtdptr;
1257 *((uint64_t *)vdata) = 0;
1258 vdata->major = htobe16(LIO_BASE_MAJOR_VERSION);
1259 vdata->minor = htobe16(LIO_BASE_MINOR_VERSION);
1260 vdata->micro = htobe16(LIO_BASE_MICRO_VERSION);
1262 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
1263 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
1264 base_queue = octeon_dev->sriov_info.pf_srn;
1266 gmx_port_id = octeon_dev->pf_num;
1267 ifidx_or_pfnum = octeon_dev->pf_num;
1269 lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n",
1270 ifidx_or_pfnum, num_iqueues, num_oqueues);
1272 ctx->octeon_id = lio_get_device_id(octeon_dev);
1274 if_cfg.if_cfg64 = 0;
1275 if_cfg.s.num_iqueues = num_iqueues;
1276 if_cfg.s.num_oqueues = num_oqueues;
1277 if_cfg.s.base_queue = base_queue;
1278 if_cfg.s.gmx_port_id = gmx_port_id;
1282 lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC,
1283 LIO_OPCODE_NIC_IF_CFG, 0,
1284 if_cfg.if_cfg64, 0);
1286 sc->callback = lio_if_cfg_callback;
1287 sc->callback_arg = sc;
1288 sc->wait_time = 3000;
1290 retval = lio_send_soft_command(octeon_dev, sc);
1291 if (retval == LIO_IQ_SEND_FAILED) {
1292 lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n",
1294 /* Soft instr is freed by driver in case of failure. */
1295 goto setup_nic_dev_fail;
1299 * Sleep on a wait queue till the cond flag indicates that the
1300 * response arrived or timed-out.
1302 lio_sleep_cond(octeon_dev, &ctx->cond);
1304 retval = resp->status;
1306 lio_dev_err(octeon_dev, "iq/oq config failed\n");
1307 goto setup_nic_dev_fail;
1310 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1311 (sizeof(struct octeon_if_cfg_info)) >> 3);
1313 num_iqueues = bitcount64(resp->cfg_info.iqmask);
1314 num_oqueues = bitcount64(resp->cfg_info.oqmask);
1316 if (!(num_iqueues) || !(num_oqueues)) {
1317 lio_dev_err(octeon_dev,
1318 "Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n",
1319 LIO_CAST64(resp->cfg_info.iqmask),
1320 LIO_CAST64(resp->cfg_info.oqmask));
1321 goto setup_nic_dev_fail;
1324 lio_dev_dbg(octeon_dev,
1325 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
1326 i, LIO_CAST64(resp->cfg_info.iqmask),
1327 LIO_CAST64(resp->cfg_info.oqmask),
1328 num_iqueues, num_oqueues);
1330 ifp = if_alloc(IFT_ETHER);
1333 lio_dev_err(octeon_dev, "Device allocation failed\n");
1334 goto setup_nic_dev_fail;
1337 lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO);
1340 lio_dev_err(octeon_dev, "Lio allocation failed\n");
1341 goto setup_nic_dev_fail;
1344 if_setsoftc(ifp, lio);
1346 if_sethwtsomax(ifp, LIO_MAX_FRAME_SIZE);
1347 if_sethwtsomaxsegcount(ifp, LIO_MAX_SG);
1348 if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
1350 lio->ifidx = ifidx_or_pfnum;
1352 props = &octeon_dev->props;
1353 props->gmxport = resp->cfg_info.linfo.gmxport;
1356 lio->linfo.num_rxpciq = num_oqueues;
1357 lio->linfo.num_txpciq = num_iqueues;
1358 for (j = 0; j < num_oqueues; j++) {
1359 lio->linfo.rxpciq[j].rxpciq64 =
1360 resp->cfg_info.linfo.rxpciq[j].rxpciq64;
1363 for (j = 0; j < num_iqueues; j++) {
1364 lio->linfo.txpciq[j].txpciq64 =
1365 resp->cfg_info.linfo.txpciq[j].txpciq64;
1368 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1369 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1370 lio->linfo.link.link_status64 =
1371 resp->cfg_info.linfo.link.link_status64;
1374 * Point to the properties for octeon device to which this
1375 * interface belongs.
1377 lio->oct_dev = octeon_dev;
1380 lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i,
1381 lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr));
1382 lio_init_ifnet(lio);
1383 /* 64-bit swap required on LE machines */
1384 lio_swap_8B_data(&lio->linfo.hw_addr, 1);
1385 for (j = 0; j < 6; j++)
1386 mac[j] = *((uint8_t *)(
1387 ((uint8_t *)&lio->linfo.hw_addr) + 2 + j));
1389 ether_ifattach(ifp, mac);
1392 * By default all interfaces on a single Octeon uses the same
1395 lio->txq = lio->linfo.txpciq[0].s.q_no;
1396 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1397 if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq,
1398 lio->linfo.num_rxpciq)) {
1399 lio_dev_err(octeon_dev, "I/O queues creation failed\n");
1400 goto setup_nic_dev_fail;
1403 lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
1405 lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq);
1406 lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq);
1408 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
1409 lio_dev_err(octeon_dev, "Gather list allocation failed\n");
1410 goto setup_nic_dev_fail;
1413 if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp))
1414 goto setup_nic_dev_fail;
1417 (if_getcapenable(ifp) & IFCAP_LRO) &&
1418 (if_getcapenable(ifp) & IFCAP_RXCSUM) &&
1419 (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6))
1420 lio_set_feature(ifp, LIO_CMD_LRO_ENABLE,
1421 LIO_LROIPV4 | LIO_LROIPV6);
1423 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER))
1424 lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1);
1426 lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0);
1428 if (lio_setup_rx_oom_poll_fn(ifp))
1429 goto setup_nic_dev_fail;
1431 lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
1432 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1433 lio->link_changes++;
1435 lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED);
1438 * Sending command to firmware to enable Rx checksum offload
1439 * by default at the time of setup of Liquidio driver for
1442 lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL,
1443 LIO_CMD_RXCSUM_ENABLE);
1444 lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL,
1445 LIO_CMD_TXCSUM_ENABLE);
1449 if (lio_send_rss_param(lio))
1450 goto setup_nic_dev_fail;
1454 lio_set_feature(ifp, LIO_CMD_SET_FNV,
1455 LIO_CMD_FNV_ENABLE);
1457 lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i);
1459 lio_free_soft_command(octeon_dev, sc);
1461 EVENTHANDLER_REGISTER(vlan_config,
1462 lio_vlan_rx_add_vid, lio,
1463 EVENTHANDLER_PRI_FIRST);
1465 EVENTHANDLER_REGISTER(vlan_unconfig,
1466 lio_vlan_rx_kill_vid, lio,
1467 EVENTHANDLER_PRI_FIRST);
1469 /* Update stats periodically */
1470 callout_init(&lio->stats_timer, 0);
1471 lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL;
1473 lio_add_hw_stats(lio);
1480 lio_free_soft_command(octeon_dev, sc);
1483 lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i);
1484 lio_destroy_nic_device(octeon_dev, i);
1491 lio_link_info(struct lio_recv_info *recv_info, void *ptr)
1493 struct octeon_device *oct = (struct octeon_device *)ptr;
1494 struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt;
1495 union octeon_link_status *ls;
1498 lio_dev_dbg(oct, "%s Called\n", __func__);
1499 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) {
1500 lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1501 recv_pkt->buffer_size[0],
1502 recv_pkt->rh.r_nic_info.gmxport);
1505 gmxport = recv_pkt->rh.r_nic_info.gmxport;
1506 ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data +
1507 LIO_DROQ_INFO_SIZE);
1508 lio_swap_8B_data((uint64_t *)ls,
1509 (sizeof(union octeon_link_status)) >> 3);
1511 if (oct->props.gmxport == gmxport)
1512 lio_update_link_status(oct->props.ifp, ls);
1515 for (i = 0; i < recv_pkt->buffer_count; i++)
1516 lio_recv_buffer_free(recv_pkt->buffer_ptr[i]);
1518 lio_free_recv_info(recv_info);
1523 lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1526 bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1527 bus_dmamap_unload(iq->txtag, finfo->map);
1532 lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1534 struct lio_gather *g;
1535 struct octeon_device *oct;
1540 iq_no = iq->txpciq.s.q_no;
1542 lio = if_getsoftc(oct->props.ifp);
1544 mtx_lock(&lio->glist_lock[iq_no]);
1545 STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries);
1546 mtx_unlock(&lio->glist_lock[iq_no]);
1548 bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1549 bus_dmamap_unload(iq->txtag, finfo->map);
1554 lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf)
1556 struct lio_soft_command *sc = (struct lio_soft_command *)buf;
1557 struct lio_if_cfg_resp *resp;
1558 struct lio_if_cfg_context *ctx;
1560 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1561 ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1563 oct = lio_get_device(ctx->octeon_id);
1565 lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n",
1566 LIO_CAST64(resp->status), status);
1569 snprintf(oct->fw_info.lio_firmware_version, 32, "%s",
1570 resp->cfg_info.lio_firmware_version);
1573 * This barrier is required to be sure that the response has been
1574 * written fully before waking up the handler
1580 lio_is_mac_changed(uint8_t *new, uint8_t *old)
1583 return ((new[0] != old[0]) || (new[1] != old[1]) ||
1584 (new[2] != old[2]) || (new[3] != old[3]) ||
1585 (new[4] != old[4]) || (new[5] != old[5]));
1591 struct lio *lio = arg;
1592 if_t ifp = lio->ifp;
1593 struct octeon_device *oct = lio->oct_dev;
1594 uint8_t *mac_new, mac_old[ETHER_HDR_LEN];
1597 lio_ifstate_set(lio, LIO_IFSTATE_RUNNING);
1599 /* Ready for link status updates */
1602 lio_dev_info(oct, "Interface Open, ready for traffic\n");
1604 /* tell Octeon to start forwarding packets to host */
1605 lio_send_rx_ctrl_cmd(lio, 1);
1607 mac_new = if_getlladdr(ifp);
1608 memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN);
1610 if (lio_is_mac_changed(mac_new, mac_old)) {
1611 ret = lio_set_mac(ifp, mac_new);
1613 lio_dev_err(oct, "MAC change failed, error: %d\n", ret);
1616 /* Now inform the stack we're ready */
1617 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1619 lio_dev_info(oct, "Interface is opened\n");
1623 lio_set_rxcsum_command(if_t ifp, int command, uint8_t rx_cmd)
1625 struct lio_ctrl_pkt nctrl;
1626 struct lio *lio = if_getsoftc(ifp);
1627 struct octeon_device *oct = lio->oct_dev;
1630 nctrl.ncmd.cmd64 = 0;
1631 nctrl.ncmd.s.cmd = command;
1632 nctrl.ncmd.s.param1 = rx_cmd;
1633 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1634 nctrl.wait_time = 100;
1636 nctrl.cb_fn = lio_ctrl_cmd_completion;
1638 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
1640 lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
1648 lio_stop_nic_module(struct octeon_device *oct)
1653 lio_dev_dbg(oct, "Stopping network interfaces\n");
1654 if (!oct->ifcount) {
1655 lio_dev_err(oct, "Init for Octeon was not completed\n");
1659 mtx_lock(&oct->cmd_resp_wqlock);
1660 oct->cmd_resp_state = LIO_DRV_OFFLINE;
1661 mtx_unlock(&oct->cmd_resp_wqlock);
1663 for (i = 0; i < oct->ifcount; i++) {
1664 lio = if_getsoftc(oct->props.ifp);
1665 for (j = 0; j < oct->num_oqs; j++)
1666 lio_unregister_droq_ops(oct,
1667 lio->linfo.rxpciq[j].s.q_no);
1670 callout_drain(&lio->stats_timer);
1672 for (i = 0; i < oct->ifcount; i++)
1673 lio_destroy_nic_device(oct, i);
1675 lio_dev_dbg(oct, "Network interface stopped\n");
1681 lio_delete_glists(struct octeon_device *oct, struct lio *lio)
1683 struct lio_gather *g;
1686 if (lio->glist_lock != NULL) {
1687 free((void *)lio->glist_lock, M_DEVBUF);
1688 lio->glist_lock = NULL;
1691 if (lio->ghead == NULL)
1694 for (i = 0; i < lio->linfo.num_txpciq; i++) {
1696 g = (struct lio_gather *)
1697 lio_delete_first_node(&lio->ghead[i]);
1701 if ((lio->glists_virt_base != NULL) &&
1702 (lio->glists_virt_base[i] != NULL)) {
1703 lio_dma_free(lio->glist_entry_size * lio->tx_qsize,
1704 lio->glists_virt_base[i]);
1708 free(lio->glists_virt_base, M_DEVBUF);
1709 lio->glists_virt_base = NULL;
1711 free(lio->glists_dma_base, M_DEVBUF);
1712 lio->glists_dma_base = NULL;
1714 free(lio->ghead, M_DEVBUF);
1719 lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
1721 struct lio_gather *g;
1724 lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF,
1726 if (lio->glist_lock == NULL)
1729 lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF,
1731 if (lio->ghead == NULL) {
1732 free((void *)lio->glist_lock, M_DEVBUF);
1733 lio->glist_lock = NULL;
1737 lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) *
1740 * allocate memory to store virtual and dma base address of
1741 * per glist consistent memory
1743 lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF,
1745 lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF,
1747 if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) {
1748 lio_delete_glists(oct, lio);
1752 for (i = 0; i < num_iqs; i++) {
1753 mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF);
1755 STAILQ_INIT(&lio->ghead[i]);
1757 lio->glists_virt_base[i] =
1758 lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize,
1759 (vm_paddr_t *)&lio->glists_dma_base[i]);
1760 if (lio->glists_virt_base[i] == NULL) {
1761 lio_delete_glists(oct, lio);
1765 for (j = 0; j < lio->tx_qsize; j++) {
1766 g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO);
1770 g->sg = (struct lio_sg_entry *)(uintptr_t)
1771 ((uint64_t)(uintptr_t)lio->glists_virt_base[i] +
1772 (j * lio->glist_entry_size));
1773 g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] +
1774 (j * lio->glist_entry_size);
1775 STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries);
1778 if (j != lio->tx_qsize) {
1779 lio_delete_glists(oct, lio);
1790 struct lio *lio = if_getsoftc(ifp);
1791 struct octeon_device *oct = lio->oct_dev;
1793 lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1794 if_link_state_change(ifp, LINK_STATE_DOWN);
1797 lio->linfo.link.s.link_up = 0;
1798 lio->link_changes++;
1800 lio_send_rx_ctrl_cmd(lio, 0);
1802 /* Tell the stack that the interface is no longer active */
1803 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1805 lio_dev_info(oct, "Interface is stopped\n");
1809 lio_check_rx_oom_status(struct lio *lio)
1811 struct lio_droq *droq;
1812 struct octeon_device *oct = lio->oct_dev;
1816 for (q = 0; q < oct->num_oqs; q++) {
1817 q_no = lio->linfo.rxpciq[q].s.q_no;
1818 droq = oct->droq[q_no];
1821 if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) {
1822 mtx_lock(&droq->lock);
1823 desc_refilled = lio_droq_refill(oct, droq);
1825 * Flush the droq descriptor data to memory to be sure
1826 * that when we update the credits the data in memory
1830 lio_write_csr32(oct, droq->pkts_credit_reg,
1832 /* make sure mmio write completes */
1833 __compiler_membar();
1834 mtx_unlock(&droq->lock);
1840 lio_poll_check_rx_oom_status(void *arg, int pending __unused)
1842 struct lio_tq *rx_status_tq = arg;
1843 struct lio *lio = rx_status_tq->ctxptr;
1845 if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING))
1846 lio_check_rx_oom_status(lio);
1848 taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1849 lio_ms_to_ticks(50));
1853 lio_setup_rx_oom_poll_fn(if_t ifp)
1855 struct lio *lio = if_getsoftc(ifp);
1856 struct octeon_device *oct = lio->oct_dev;
1857 struct lio_tq *rx_status_tq;
1859 rx_status_tq = &lio->rx_status_tq;
1861 rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
1862 taskqueue_thread_enqueue,
1864 if (rx_status_tq->tq == NULL) {
1865 lio_dev_err(oct, "unable to create lio rx oom status tq\n");
1869 TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
1870 lio_poll_check_rx_oom_status, (void *)rx_status_tq);
1872 rx_status_tq->ctxptr = lio;
1874 taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET,
1875 "lio%d_rx_oom_status",
1878 taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1879 lio_ms_to_ticks(50));
1885 lio_cleanup_rx_oom_poll_fn(if_t ifp)
1887 struct lio *lio = if_getsoftc(ifp);
1889 if (lio->rx_status_tq.tq != NULL) {
1890 while (taskqueue_cancel_timeout(lio->rx_status_tq.tq,
1891 &lio->rx_status_tq.work, NULL))
1892 taskqueue_drain_timeout(lio->rx_status_tq.tq,
1893 &lio->rx_status_tq.work);
1895 taskqueue_free(lio->rx_status_tq.tq);
1897 lio->rx_status_tq.tq = NULL;
1902 lio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1904 if_t ifp = oct->props.ifp;
1908 lio_dev_err(oct, "%s No ifp ptr for index %d\n",
1913 lio = if_getsoftc(ifp);
1915 lio_ifstate_set(lio, LIO_IFSTATE_DETACH);
1917 lio_dev_dbg(oct, "NIC device cleanup\n");
1919 if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1922 if (lio_wait_for_pending_requests(oct))
1923 lio_dev_err(oct, "There were pending requests\n");
1925 if (lio_wait_for_instr_fetch(oct))
1926 lio_dev_err(oct, "IQ had pending instructions\n");
1928 if (lio_wait_for_oq_pkts(oct))
1929 lio_dev_err(oct, "OQ had pending packets\n");
1931 if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1932 ether_ifdetach(ifp);
1934 lio_tcp_lro_free(oct, ifp);
1936 lio_cleanup_rx_oom_poll_fn(ifp);
1938 lio_delete_glists(oct, lio);
1940 EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach);
1941 EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach);
1943 free(lio, M_DEVBUF);
1947 oct->props.gmxport = -1;
1949 oct->props.ifp = NULL;
1953 print_link_info(if_t ifp)
1955 struct lio *lio = if_getsoftc(ifp);
1957 if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
1958 lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
1959 struct octeon_link_info *linfo = &lio->linfo;
1961 if (linfo->link.s.link_up) {
1962 lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n",
1963 linfo->link.s.speed,
1964 (linfo->link.s.duplex) ? "Full" : "Half");
1966 lio_dev_info(lio->oct_dev, "Link Down\n");
1972 lio_update_link_status(if_t ifp, union octeon_link_status *ls)
1974 struct lio *lio = if_getsoftc(ifp);
1975 int changed = (lio->linfo.link.link_status64 != ls->link_status64);
1977 lio->linfo.link.link_status64 = ls->link_status64;
1979 if ((lio->intf_open) && (changed)) {
1980 print_link_info(ifp);
1981 lio->link_changes++;
1982 if (lio->linfo.link.s.link_up)
1983 if_link_state_change(ifp, LINK_STATE_UP);
1985 if_link_state_change(ifp, LINK_STATE_DOWN);
1990 * \brief Callback for rx ctrl
1991 * @param status status of request
1992 * @param buf pointer to resp structure
1995 lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
1997 struct lio_soft_command *sc = (struct lio_soft_command *)buf;
1998 struct lio_rx_ctl_context *ctx;
2000 ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
2002 oct = lio_get_device(ctx->octeon_id);
2004 lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n",
2005 LIO_CAST64(status));
2009 * This barrier is required to be sure that the response has been
2010 * written fully before waking up the handler
2016 lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop)
2018 struct lio_soft_command *sc;
2019 struct lio_rx_ctl_context *ctx;
2020 union octeon_cmd *ncmd;
2021 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2022 int ctx_size = sizeof(struct lio_rx_ctl_context);
2025 if (oct->props.rx_on == start_stop)
2028 sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size);
2032 ncmd = (union octeon_cmd *)sc->virtdptr;
2033 ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
2036 ctx->octeon_id = lio_get_device_id(oct);
2038 ncmd->s.cmd = LIO_CMD_RX_CTL;
2039 ncmd->s.param1 = start_stop;
2041 lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
2043 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2045 lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
2048 sc->callback = lio_rx_ctl_callback;
2049 sc->callback_arg = sc;
2050 sc->wait_time = 5000;
2052 retval = lio_send_soft_command(oct, sc);
2053 if (retval == LIO_IQ_SEND_FAILED) {
2054 lio_dev_err(oct, "Failed to send RX Control message\n");
2057 * Sleep on a wait queue till the cond flag indicates that the
2058 * response arrived or timed-out.
2060 lio_sleep_cond(oct, &ctx->cond);
2061 oct->props.rx_on = start_stop;
2064 lio_free_soft_command(oct, sc);
2068 lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid)
2070 struct lio_ctrl_pkt nctrl;
2071 struct lio *lio = if_getsoftc(ifp);
2072 struct octeon_device *oct = lio->oct_dev;
2075 if (if_getsoftc(ifp) != arg) /* Not our event */
2078 if ((vid == 0) || (vid > 4095)) /* Invalid */
2081 bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2083 nctrl.ncmd.cmd64 = 0;
2084 nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER;
2085 nctrl.ncmd.s.param1 = vid;
2086 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2087 nctrl.wait_time = 100;
2089 nctrl.cb_fn = lio_ctrl_cmd_completion;
2091 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2093 lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n",
2099 lio_vlan_rx_kill_vid(void *arg, if_t ifp, uint16_t vid)
2101 struct lio_ctrl_pkt nctrl;
2102 struct lio *lio = if_getsoftc(ifp);
2103 struct octeon_device *oct = lio->oct_dev;
2106 if (if_getsoftc(ifp) != arg) /* Not our event */
2109 if ((vid == 0) || (vid > 4095)) /* Invalid */
2112 bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2114 nctrl.ncmd.cmd64 = 0;
2115 nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER;
2116 nctrl.ncmd.s.param1 = vid;
2117 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2118 nctrl.wait_time = 100;
2120 nctrl.cb_fn = lio_ctrl_cmd_completion;
2122 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2125 "Kill VLAN filter failed in core (ret: 0x%x)\n",
2131 lio_wait_for_oq_pkts(struct octeon_device *oct)
2133 int i, pending_pkts, pkt_cnt = 0, retry = 100;
2138 for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2139 if (!(oct->io_qmask.oq & BIT_ULL(i)))
2142 pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]);
2144 pending_pkts += pkt_cnt;
2145 taskqueue_enqueue(oct->droq[i]->droq_taskqueue,
2146 &oct->droq[i]->droq_task);
2151 lio_sleep_timeout(1);
2152 } while (retry-- && pending_pkts);
2158 lio_destroy_resources(struct octeon_device *oct)
2162 switch (atomic_load_acq_int(&oct->status)) {
2163 case LIO_DEV_RUNNING:
2164 case LIO_DEV_CORE_OK:
2165 /* No more instructions will be forwarded. */
2166 atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET);
2168 oct->app_mode = LIO_DRV_INVALID_APP;
2169 lio_dev_dbg(oct, "Device state is now %s\n",
2170 lio_get_state_string(&oct->status));
2172 lio_sleep_timeout(100);
2175 case LIO_DEV_HOST_OK:
2178 case LIO_DEV_CONSOLE_INIT_DONE:
2179 /* Remove any consoles */
2180 lio_remove_consoles(oct);
2183 case LIO_DEV_IO_QUEUES_DONE:
2184 if (lio_wait_for_pending_requests(oct))
2185 lio_dev_err(oct, "There were pending requests\n");
2187 if (lio_wait_for_instr_fetch(oct))
2188 lio_dev_err(oct, "IQ had pending instructions\n");
2191 * Disable the input and output queues now. No more packets will
2192 * arrive from Octeon, but we should wait for all packet
2193 * processing to finish.
2195 oct->fn_list.disable_io_queues(oct);
2197 if (lio_wait_for_oq_pkts(oct))
2198 lio_dev_err(oct, "OQ had pending packets\n");
2201 case LIO_DEV_INTR_SET_DONE:
2202 /* Disable interrupts */
2203 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
2206 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
2207 if (oct->ioq_vector[i].tag != NULL) {
2208 bus_teardown_intr(oct->device,
2209 oct->ioq_vector[i].msix_res,
2210 oct->ioq_vector[i].tag);
2211 oct->ioq_vector[i].tag = NULL;
2213 if (oct->ioq_vector[i].msix_res != NULL) {
2214 bus_release_resource(oct->device,
2216 oct->ioq_vector[i].vector,
2217 oct->ioq_vector[i].msix_res);
2218 oct->ioq_vector[i].msix_res = NULL;
2221 /* non-iov vector's argument is oct struct */
2222 if (oct->tag != NULL) {
2223 bus_teardown_intr(oct->device, oct->msix_res,
2228 if (oct->msix_res != NULL) {
2229 bus_release_resource(oct->device, SYS_RES_IRQ,
2232 oct->msix_res = NULL;
2235 pci_release_msi(oct->device);
2238 case LIO_DEV_IN_RESET:
2239 case LIO_DEV_DROQ_INIT_DONE:
2240 /* Wait for any pending operations */
2242 for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2243 if (!(oct->io_qmask.oq & BIT_ULL(i)))
2245 lio_delete_droq(oct, i);
2249 case LIO_DEV_RESP_LIST_INIT_DONE:
2250 for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
2251 if (oct->droq[i] != NULL) {
2252 free(oct->droq[i], M_DEVBUF);
2253 oct->droq[i] = NULL;
2256 lio_delete_response_list(oct);
2259 case LIO_DEV_INSTR_QUEUE_INIT_DONE:
2260 for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
2261 if (!(oct->io_qmask.iq & BIT_ULL(i)))
2264 lio_delete_instr_queue(oct, i);
2268 case LIO_DEV_MSIX_ALLOC_VECTOR_DONE:
2269 for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
2270 if (oct->instr_queue[i] != NULL) {
2271 free(oct->instr_queue[i], M_DEVBUF);
2272 oct->instr_queue[i] = NULL;
2275 lio_free_ioq_vector(oct);
2278 case LIO_DEV_SC_BUFF_POOL_INIT_DONE:
2279 lio_free_sc_buffer_pool(oct);
2282 case LIO_DEV_DISPATCH_INIT_DONE:
2283 lio_delete_dispatch_list(oct);
2286 case LIO_DEV_PCI_MAP_DONE:
2287 refcount = lio_deregister_device(oct);
2289 if (fw_type_is_none())
2293 oct->fn_list.soft_reset(oct);
2295 lio_unmap_pci_barx(oct, 0);
2296 lio_unmap_pci_barx(oct, 1);
2299 case LIO_DEV_PCI_ENABLE_DONE:
2300 /* Disable the device, releasing the PCI INT */
2301 pci_disable_busmaster(oct->device);
2304 case LIO_DEV_BEGIN_STATE:
2306 } /* end switch (oct->status) */