2 * Copyright(c) 2002-2011 Exar Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification are permitted provided the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Exar Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include <dev/vxge/vxge.h>
35 static int vxge_pci_bd_no = -1;
36 static u32 vxge_drv_copyright = 0;
37 static u32 vxge_dev_ref_count = 0;
38 static u32 vxge_dev_req_reboot = 0;
40 static int vpath_selector[VXGE_HAL_MAX_VIRTUAL_PATHS] = \
41 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
45 * Probes for x3100 devices
48 vxge_probe(device_t ndev)
53 u16 pci_vendor_id = 0;
54 u16 pci_device_id = 0;
56 char adapter_name[64];
58 pci_vendor_id = pci_get_vendor(ndev);
59 if (pci_vendor_id != VXGE_PCI_VENDOR_ID)
62 pci_device_id = pci_get_device(ndev);
64 if (pci_device_id == VXGE_PCI_DEVICE_ID_TITAN_1) {
66 pci_bd_no = (pci_get_bus(ndev) | pci_get_slot(ndev));
68 snprintf(adapter_name, sizeof(adapter_name),
69 VXGE_ADAPTER_NAME, pci_get_revid(ndev));
70 device_set_desc_copy(ndev, adapter_name);
72 if (!vxge_drv_copyright) {
73 device_printf(ndev, VXGE_COPYRIGHT);
74 vxge_drv_copyright = 1;
77 if (vxge_dev_req_reboot == 0) {
78 vxge_pci_bd_no = pci_bd_no;
79 err = BUS_PROBE_DEFAULT;
81 if (pci_bd_no != vxge_pci_bd_no) {
82 vxge_pci_bd_no = pci_bd_no;
83 err = BUS_PROBE_DEFAULT;
94 * Connects driver to the system if probe was success @ndev handle
97 vxge_attach(device_t ndev)
101 vxge_hal_device_t *hldev = NULL;
102 vxge_hal_device_attr_t device_attr;
103 vxge_free_resources_e error_level = VXGE_FREE_NONE;
105 vxge_hal_status_e status = VXGE_HAL_OK;
107 /* Get per-ndev buffer */
108 vdev = (vxge_dev_t *) device_get_softc(ndev);
112 bzero(vdev, sizeof(vxge_dev_t));
115 strlcpy(vdev->ndev_name, "vxge", sizeof(vdev->ndev_name));
117 err = vxge_driver_config(vdev);
121 /* Initialize HAL driver */
122 status = vxge_driver_init(vdev);
123 if (status != VXGE_HAL_OK) {
124 device_printf(vdev->ndev, "Failed to initialize driver\n");
127 /* Enable PCI bus-master */
128 pci_enable_busmaster(ndev);
130 /* Allocate resources */
131 err = vxge_alloc_resources(vdev);
133 device_printf(vdev->ndev, "resource allocation failed\n");
137 err = vxge_device_hw_info_get(vdev);
139 error_level = VXGE_FREE_BAR2;
143 /* Get firmware default values for Device Configuration */
144 vxge_hal_device_config_default_get(vdev->device_config);
146 /* Customize Device Configuration based on User request */
147 vxge_vpath_config(vdev);
149 /* Allocate ISR resources */
150 err = vxge_alloc_isr_resources(vdev);
152 error_level = VXGE_FREE_ISR_RESOURCE;
153 device_printf(vdev->ndev, "isr resource allocation failed\n");
158 device_attr.bar0 = (u8 *) vdev->pdev->bar_info[0];
159 device_attr.bar1 = (u8 *) vdev->pdev->bar_info[1];
160 device_attr.bar2 = (u8 *) vdev->pdev->bar_info[2];
161 device_attr.regh0 = (vxge_bus_res_t *) vdev->pdev->reg_map[0];
162 device_attr.regh1 = (vxge_bus_res_t *) vdev->pdev->reg_map[1];
163 device_attr.regh2 = (vxge_bus_res_t *) vdev->pdev->reg_map[2];
164 device_attr.irqh = (pci_irq_h) vdev->config.isr_info[0].irq_handle;
165 device_attr.cfgh = vdev->pdev;
166 device_attr.pdev = vdev->pdev;
168 /* Initialize HAL Device */
169 status = vxge_hal_device_initialize((vxge_hal_device_h *) &hldev,
170 &device_attr, vdev->device_config);
171 if (status != VXGE_HAL_OK) {
172 error_level = VXGE_FREE_ISR_RESOURCE;
173 device_printf(vdev->ndev, "hal device initialization failed\n");
178 vxge_hal_device_private_set(hldev, vdev);
180 if (vdev->is_privilaged) {
181 err = vxge_firmware_verify(vdev);
183 vxge_dev_req_reboot = 1;
184 error_level = VXGE_FREE_TERMINATE_DEVICE;
189 /* Allocate memory for vpath */
190 vdev->vpaths = (vxge_vpath_t *)
191 vxge_mem_alloc(vdev->no_of_vpath * sizeof(vxge_vpath_t));
193 if (vdev->vpaths == NULL) {
194 error_level = VXGE_FREE_TERMINATE_DEVICE;
195 device_printf(vdev->ndev, "vpath memory allocation failed\n");
199 vdev->no_of_func = 1;
200 if (vdev->is_privilaged) {
202 vxge_hal_func_mode_count(vdev->devh,
203 vdev->config.hw_info.function_mode, &vdev->no_of_func);
205 vxge_bw_priority_config(vdev);
208 /* Initialize mutexes */
209 vxge_mutex_init(vdev);
211 /* Initialize Media */
212 vxge_media_init(vdev);
214 err = vxge_ifp_setup(ndev);
216 error_level = VXGE_FREE_MEDIA;
217 device_printf(vdev->ndev, "setting up interface failed\n");
221 err = vxge_isr_setup(vdev);
223 error_level = VXGE_FREE_INTERFACE;
224 device_printf(vdev->ndev,
225 "failed to associate interrupt handler with device\n");
228 vxge_device_hw_info_print(vdev);
229 vdev->is_active = TRUE;
233 vxge_free_resources(ndev, error_level);
237 gone_in_dev(ndev, 12, "vxge(4) driver");
243 * Detaches driver from the Kernel subsystem
246 vxge_detach(device_t ndev)
250 vdev = (vxge_dev_t *) device_get_softc(ndev);
251 if (vdev->is_active) {
252 vdev->is_active = FALSE;
254 vxge_free_resources(ndev, VXGE_FREE_ALL);
262 * To shutdown device before system shutdown
265 vxge_shutdown(device_t ndev)
267 vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev);
274 * Initialize the interface
277 vxge_init(void *vdev_ptr)
279 vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr;
282 vxge_init_locked(vdev);
283 VXGE_DRV_UNLOCK(vdev);
288 * Initialize the interface
291 vxge_init_locked(vxge_dev_t *vdev)
294 vxge_hal_device_t *hldev = vdev->devh;
295 vxge_hal_status_e status = VXGE_HAL_OK;
296 vxge_hal_vpath_h vpath_handle;
298 ifnet_t ifp = vdev->ifp;
300 /* If device is in running state, initializing is not required */
301 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
304 VXGE_DRV_LOCK_ASSERT(vdev);
307 err = vxge_vpath_open(vdev);
311 if (vdev->config.rth_enable) {
312 status = vxge_rth_config(vdev);
313 if (status != VXGE_HAL_OK)
317 for (i = 0; i < vdev->no_of_vpath; i++) {
318 vpath_handle = vxge_vpath_handle_get(vdev, i);
322 /* check initial mtu before enabling the device */
323 status = vxge_hal_device_mtu_check(vpath_handle, ifp->if_mtu);
324 if (status != VXGE_HAL_OK) {
325 device_printf(vdev->ndev,
326 "invalid mtu size %u specified\n", ifp->if_mtu);
330 status = vxge_hal_vpath_mtu_set(vpath_handle, ifp->if_mtu);
331 if (status != VXGE_HAL_OK) {
332 device_printf(vdev->ndev,
333 "setting mtu in device failed\n");
338 /* Enable HAL device */
339 status = vxge_hal_device_enable(hldev);
340 if (status != VXGE_HAL_OK) {
341 device_printf(vdev->ndev, "failed to enable device\n");
345 if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX)
346 vxge_msix_enable(vdev);
348 /* Checksum capability */
349 ifp->if_hwassist = 0;
350 if (ifp->if_capenable & IFCAP_TXCSUM)
351 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
353 if (ifp->if_capenable & IFCAP_TSO4)
354 ifp->if_hwassist |= CSUM_TSO;
356 for (i = 0; i < vdev->no_of_vpath; i++) {
357 vpath_handle = vxge_vpath_handle_get(vdev, i);
361 /* Enabling mcast for all vpath */
362 vxge_hal_vpath_mcast_enable(vpath_handle);
364 /* Enabling bcast for all vpath */
365 status = vxge_hal_vpath_bcast_enable(vpath_handle);
366 if (status != VXGE_HAL_OK)
367 device_printf(vdev->ndev,
368 "can't enable bcast on vpath (%d)\n", i);
371 /* Enable interrupts */
372 vxge_hal_device_intr_enable(vdev->devh);
374 for (i = 0; i < vdev->no_of_vpath; i++) {
375 vpath_handle = vxge_vpath_handle_get(vdev, i);
379 bzero(&(vdev->vpaths[i].driver_stats),
380 sizeof(vxge_drv_stats_t));
381 status = vxge_hal_vpath_enable(vpath_handle);
382 if (status != VXGE_HAL_OK)
386 vxge_os_mdelay(1000);
388 /* Device is initialized */
389 vdev->is_initialized = TRUE;
391 /* Now inform the stack we're ready */
392 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
393 ifp->if_drv_flags |= IFF_DRV_RUNNING;
398 vxge_hal_device_intr_disable(vdev->devh);
399 vxge_hal_device_disable(hldev);
402 vxge_vpath_close(vdev);
410 * Initializes HAL driver
413 vxge_driver_init(vxge_dev_t *vdev)
415 vxge_hal_uld_cbs_t uld_callbacks;
416 vxge_hal_driver_config_t driver_config;
417 vxge_hal_status_e status = VXGE_HAL_OK;
419 /* Initialize HAL driver */
420 if (!vxge_dev_ref_count) {
421 bzero(&uld_callbacks, sizeof(vxge_hal_uld_cbs_t));
422 bzero(&driver_config, sizeof(vxge_hal_driver_config_t));
424 uld_callbacks.link_up = vxge_link_up;
425 uld_callbacks.link_down = vxge_link_down;
426 uld_callbacks.crit_err = vxge_crit_error;
427 uld_callbacks.sched_timer = NULL;
428 uld_callbacks.xpak_alarm_log = NULL;
430 status = vxge_hal_driver_initialize(&driver_config,
432 if (status != VXGE_HAL_OK) {
433 device_printf(vdev->ndev,
434 "failed to initialize driver\n");
438 vxge_hal_driver_debug_set(VXGE_TRACE);
439 vxge_dev_ref_count++;
449 vxge_driver_config(vxge_dev_t *vdev)
452 char temp_buffer[30];
454 vxge_bw_info_t bw_info;
456 VXGE_GET_PARAM("hint.vxge.0.no_of_vpath", vdev->config,
457 no_of_vpath, VXGE_DEFAULT_USER_HARDCODED);
459 if (vdev->config.no_of_vpath == VXGE_DEFAULT_USER_HARDCODED)
460 vdev->config.no_of_vpath = mp_ncpus;
462 if (vdev->config.no_of_vpath <= 0) {
464 device_printf(vdev->ndev,
465 "Failed to load driver, \
466 invalid config : \'no_of_vpath\'\n");
470 VXGE_GET_PARAM("hint.vxge.0.intr_coalesce", vdev->config,
471 intr_coalesce, VXGE_DEFAULT_CONFIG_DISABLE);
473 VXGE_GET_PARAM("hint.vxge.0.rth_enable", vdev->config,
474 rth_enable, VXGE_DEFAULT_CONFIG_ENABLE);
476 VXGE_GET_PARAM("hint.vxge.0.rth_bkt_sz", vdev->config,
477 rth_bkt_sz, VXGE_DEFAULT_RTH_BUCKET_SIZE);
479 VXGE_GET_PARAM("hint.vxge.0.lro_enable", vdev->config,
480 lro_enable, VXGE_DEFAULT_CONFIG_ENABLE);
482 VXGE_GET_PARAM("hint.vxge.0.tso_enable", vdev->config,
483 tso_enable, VXGE_DEFAULT_CONFIG_ENABLE);
485 VXGE_GET_PARAM("hint.vxge.0.tx_steering", vdev->config,
486 tx_steering, VXGE_DEFAULT_CONFIG_DISABLE);
488 VXGE_GET_PARAM("hint.vxge.0.msix_enable", vdev->config,
489 intr_mode, VXGE_HAL_INTR_MODE_MSIX);
491 VXGE_GET_PARAM("hint.vxge.0.ifqmaxlen", vdev->config,
492 ifq_maxlen, VXGE_DEFAULT_CONFIG_IFQ_MAXLEN);
494 VXGE_GET_PARAM("hint.vxge.0.port_mode", vdev->config,
495 port_mode, VXGE_DEFAULT_CONFIG_VALUE);
497 if (vdev->config.port_mode == VXGE_DEFAULT_USER_HARDCODED)
498 vdev->config.port_mode = VXGE_DEFAULT_CONFIG_VALUE;
500 VXGE_GET_PARAM("hint.vxge.0.l2_switch", vdev->config,
501 l2_switch, VXGE_DEFAULT_CONFIG_VALUE);
503 if (vdev->config.l2_switch == VXGE_DEFAULT_USER_HARDCODED)
504 vdev->config.l2_switch = VXGE_DEFAULT_CONFIG_VALUE;
506 VXGE_GET_PARAM("hint.vxge.0.fw_upgrade", vdev->config,
507 fw_option, VXGE_FW_UPGRADE_ALL);
509 VXGE_GET_PARAM("hint.vxge.0.low_latency", vdev->config,
510 low_latency, VXGE_DEFAULT_CONFIG_DISABLE);
512 VXGE_GET_PARAM("hint.vxge.0.func_mode", vdev->config,
513 function_mode, VXGE_DEFAULT_CONFIG_VALUE);
515 if (vdev->config.function_mode == VXGE_DEFAULT_USER_HARDCODED)
516 vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE;
518 if (!(is_multi_func(vdev->config.function_mode) ||
519 is_single_func(vdev->config.function_mode)))
520 vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE;
522 for (i = 0; i < VXGE_HAL_MAX_FUNCTIONS; i++) {
526 sprintf(temp_buffer, "hint.vxge.0.bandwidth_%d", i);
527 VXGE_GET_PARAM(temp_buffer, bw_info,
528 bandwidth, VXGE_DEFAULT_USER_HARDCODED);
530 if (bw_info.bandwidth == VXGE_DEFAULT_USER_HARDCODED)
531 bw_info.bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT;
533 sprintf(temp_buffer, "hint.vxge.0.priority_%d", i);
534 VXGE_GET_PARAM(temp_buffer, bw_info,
535 priority, VXGE_DEFAULT_USER_HARDCODED);
537 if (bw_info.priority == VXGE_DEFAULT_USER_HARDCODED)
538 bw_info.priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT;
540 vxge_os_memcpy(&vdev->config.bw_info[i], &bw_info,
541 sizeof(vxge_bw_info_t));
552 vxge_stop(vxge_dev_t *vdev)
555 vxge_stop_locked(vdev);
556 VXGE_DRV_UNLOCK(vdev);
561 * Common code for both stop and part of reset.
562 * disables device, interrupts and closes vpaths handle
565 vxge_stop_locked(vxge_dev_t *vdev)
567 u64 adapter_status = 0;
568 vxge_hal_status_e status;
569 vxge_hal_device_t *hldev = vdev->devh;
570 ifnet_t ifp = vdev->ifp;
572 VXGE_DRV_LOCK_ASSERT(vdev);
574 /* If device is not in "Running" state, return */
575 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
578 /* Set appropriate flags */
579 vdev->is_initialized = FALSE;
580 hldev->link_state = VXGE_HAL_LINK_NONE;
581 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
582 if_link_state_change(ifp, LINK_STATE_DOWN);
584 /* Disable interrupts */
585 vxge_hal_device_intr_disable(hldev);
587 /* Disable HAL device */
588 status = vxge_hal_device_disable(hldev);
589 if (status != VXGE_HAL_OK) {
590 vxge_hal_device_status(hldev, &adapter_status);
591 device_printf(vdev->ndev,
592 "adapter status: 0x%llx\n", adapter_status);
596 vxge_vpath_reset(vdev);
598 vxge_os_mdelay(1000);
601 vxge_vpath_close(vdev);
605 vxge_send(ifnet_t ifp)
608 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
610 vpath = &(vdev->vpaths[0]);
612 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
613 if (VXGE_TX_TRYLOCK(vpath)) {
614 vxge_send_locked(ifp, vpath);
615 VXGE_TX_UNLOCK(vpath);
621 vxge_send_locked(ifnet_t ifp, vxge_vpath_t *vpath)
623 mbuf_t m_head = NULL;
624 vxge_dev_t *vdev = vpath->vdev;
626 VXGE_TX_LOCK_ASSERT(vpath);
628 if ((!vdev->is_initialized) ||
629 ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
633 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
634 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
638 if (vxge_xmit(ifp, vpath, &m_head)) {
642 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
643 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
644 VXGE_DRV_STATS(vpath, tx_again);
647 /* Send a copy of the frame to the BPF listener */
648 ETHER_BPF_MTAP(ifp, m_head);
652 #if __FreeBSD_version >= 800000
655 vxge_mq_send(ifnet_t ifp, mbuf_t m_head)
660 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
662 if (vdev->config.tx_steering) {
663 i = vxge_vpath_get(vdev, m_head);
664 } else if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
665 i = m_head->m_pkthdr.flowid % vdev->no_of_vpath;
668 vpath = &(vdev->vpaths[i]);
669 if (VXGE_TX_TRYLOCK(vpath)) {
670 err = vxge_mq_send_locked(ifp, vpath, m_head);
671 VXGE_TX_UNLOCK(vpath);
673 err = drbr_enqueue(ifp, vpath->br, m_head);
679 vxge_mq_send_locked(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t m_head)
683 vxge_dev_t *vdev = vpath->vdev;
685 VXGE_TX_LOCK_ASSERT(vpath);
687 if ((!vdev->is_initialized) ||
688 ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
690 err = drbr_enqueue(ifp, vpath->br, m_head);
693 if (m_head == NULL) {
694 next = drbr_dequeue(ifp, vpath->br);
695 } else if (drbr_needs_enqueue(ifp, vpath->br)) {
696 if ((err = drbr_enqueue(ifp, vpath->br, m_head)) != 0)
698 next = drbr_dequeue(ifp, vpath->br);
702 /* Process the queue */
703 while (next != NULL) {
704 if ((err = vxge_xmit(ifp, vpath, &next)) != 0) {
708 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
709 err = drbr_enqueue(ifp, vpath->br, next);
710 VXGE_DRV_STATS(vpath, tx_again);
713 if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
714 if (next->m_flags & M_MCAST)
715 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
717 /* Send a copy of the frame to the BPF listener */
718 ETHER_BPF_MTAP(ifp, next);
719 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
722 next = drbr_dequeue(ifp, vpath->br);
730 vxge_mq_qflush(ifnet_t ifp)
736 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
738 for (i = 0; i < vdev->no_of_vpath; i++) {
739 vpath = &(vdev->vpaths[i]);
744 while ((m_head = buf_ring_dequeue_sc(vpath->br)) != NULL)
745 vxge_free_packet(m_head);
747 VXGE_TX_UNLOCK(vpath);
754 vxge_xmit(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t *m_headp)
756 int err, num_segs = 0;
757 u32 txdl_avail, dma_index, tagged = 0;
760 bus_size_t dma_sizes;
763 vxge_txdl_priv_t *txdl_priv;
764 vxge_hal_txdl_h txdlh;
765 vxge_hal_status_e status;
766 vxge_dev_t *vdev = vpath->vdev;
768 VXGE_DRV_STATS(vpath, tx_xmit);
770 txdl_avail = vxge_hal_fifo_free_txdl_count_get(vpath->handle);
771 if (txdl_avail < VXGE_TX_LOW_THRESHOLD) {
773 VXGE_DRV_STATS(vpath, tx_low_dtr_cnt);
778 /* Reserve descriptors */
779 status = vxge_hal_fifo_txdl_reserve(vpath->handle, &txdlh, &dtr_priv);
780 if (status != VXGE_HAL_OK) {
781 VXGE_DRV_STATS(vpath, tx_reserve_failed);
786 /* Update Tx private structure for this descriptor */
787 txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
790 * Map the packet for DMA.
791 * Returns number of segments through num_segs.
793 err = vxge_dma_mbuf_coalesce(vpath->dma_tag_tx, txdl_priv->dma_map,
794 m_headp, txdl_priv->dma_buffers, &num_segs);
796 if (vpath->driver_stats.tx_max_frags < num_segs)
797 vpath->driver_stats.tx_max_frags = num_segs;
800 VXGE_DRV_STATS(vpath, tx_no_dma_setup);
801 vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
803 } else if (err != 0) {
804 vxge_free_packet(*m_headp);
805 VXGE_DRV_STATS(vpath, tx_no_dma_setup);
806 vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
810 txdl_priv->mbuf_pkt = *m_headp;
812 /* Set VLAN tag in descriptor only if this packet has it */
813 if ((*m_headp)->m_flags & M_VLANTAG)
814 vxge_hal_fifo_txdl_vlan_set(txdlh,
815 (*m_headp)->m_pkthdr.ether_vtag);
817 /* Set descriptor buffer for header and each fragment/segment */
818 for (dma_index = 0; dma_index < num_segs; dma_index++) {
820 dma_sizes = txdl_priv->dma_buffers[dma_index].ds_len;
821 dma_addr = htole64(txdl_priv->dma_buffers[dma_index].ds_addr);
823 vxge_hal_fifo_txdl_buffer_set(vpath->handle, txdlh, dma_index,
824 dma_addr, dma_sizes);
827 /* Pre-write Sync of mapping */
828 bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map,
829 BUS_DMASYNC_PREWRITE);
831 if ((*m_headp)->m_pkthdr.csum_flags & CSUM_TSO) {
832 if ((*m_headp)->m_pkthdr.tso_segsz) {
833 VXGE_DRV_STATS(vpath, tx_tso);
834 vxge_hal_fifo_txdl_lso_set(txdlh,
835 VXGE_HAL_FIFO_LSO_FRM_ENCAP_AUTO,
836 (*m_headp)->m_pkthdr.tso_segsz);
841 if (ifp->if_hwassist > 0) {
842 vxge_hal_fifo_txdl_cksum_set_bits(txdlh,
843 VXGE_HAL_FIFO_TXD_TX_CKO_IPV4_EN |
844 VXGE_HAL_FIFO_TXD_TX_CKO_TCP_EN |
845 VXGE_HAL_FIFO_TXD_TX_CKO_UDP_EN);
848 if ((vxge_hal_device_check_id(vdev->devh) == VXGE_HAL_CARD_TITAN_1A) &&
849 (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0)))
852 vxge_hal_fifo_txdl_post(vpath->handle, txdlh, tagged);
853 VXGE_DRV_STATS(vpath, tx_posted);
861 * Allocate buffers and set them into descriptors for later use
865 vxge_tx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh,
866 void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen)
870 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
871 vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
873 err = bus_dmamap_create(vpath->dma_tag_tx, BUS_DMA_NOWAIT,
874 &txdl_priv->dma_map);
876 return ((err == 0) ? VXGE_HAL_OK : VXGE_HAL_FAIL);
881 * If the interrupt is due to Tx completion, free the sent buffer
884 vxge_tx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh,
885 void *dtr_priv, vxge_hal_fifo_tcode_e t_code, void *userdata)
887 vxge_hal_status_e status = VXGE_HAL_OK;
889 vxge_txdl_priv_t *txdl_priv;
890 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
891 vxge_dev_t *vdev = vpath->vdev;
893 ifnet_t ifp = vdev->ifp;
898 * For each completed descriptor
899 * Get private structure, free buffer, do unmapping, and free descriptor
903 VXGE_DRV_STATS(vpath, tx_compl);
904 if (t_code != VXGE_HAL_FIFO_T_CODE_OK) {
905 device_printf(vdev->ndev, "tx transfer code %d\n",
908 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
909 VXGE_DRV_STATS(vpath, tx_tcode);
910 vxge_hal_fifo_handle_tcode(vpath_handle, txdlh, t_code);
912 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
913 txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
915 bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map);
917 vxge_free_packet(txdl_priv->mbuf_pkt);
918 vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
920 } while (vxge_hal_fifo_txdl_next_completed(vpath_handle, &txdlh,
921 &dtr_priv, &t_code) == VXGE_HAL_OK);
924 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
925 VXGE_TX_UNLOCK(vpath);
932 vxge_tx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh,
933 void *dtr_priv, vxge_hal_txdl_state_e state,
934 void *userdata, vxge_hal_reopen_e reopen)
936 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
937 vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
939 if (state != VXGE_HAL_TXDL_STATE_POSTED)
942 if (txdl_priv != NULL) {
943 bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map,
944 BUS_DMASYNC_POSTWRITE);
946 bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map);
947 bus_dmamap_destroy(vpath->dma_tag_tx, txdl_priv->dma_map);
948 vxge_free_packet(txdl_priv->mbuf_pkt);
951 /* Free the descriptor */
952 vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
957 * Allocate buffers and set them into descriptors for later use
961 vxge_rx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
962 void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen)
965 vxge_hal_status_e status = VXGE_HAL_OK;
967 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
968 vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
970 /* Create DMA map for these descriptors */
971 err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT,
974 if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) {
975 bus_dmamap_destroy(vpath->dma_tag_rx,
977 status = VXGE_HAL_FAIL;
988 vxge_rx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
989 void *dtr_priv, u8 t_code, void *userdata)
993 vxge_rxd_priv_t *rxd_priv;
994 vxge_hal_ring_rxd_info_t ext_info;
995 vxge_hal_status_e status = VXGE_HAL_OK;
997 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
998 vxge_dev_t *vdev = vpath->vdev;
1000 struct lro_ctrl *lro = &vpath->lro;
1002 /* get the interface pointer */
1003 ifnet_t ifp = vdev->ifp;
1006 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1007 vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1008 status = VXGE_HAL_FAIL;
1012 VXGE_DRV_STATS(vpath, rx_compl);
1013 rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1015 /* Gets details of mbuf i.e., packet length */
1016 vxge_rx_rxd_1b_get(vpath, rxdh, dtr_priv);
1019 * Prepare one buffer to send it to upper layer Since upper
1020 * layer frees the buffer do not use rxd_priv->mbuf_pkt.
1021 * Meanwhile prepare a new buffer, do mapping, use with the
1022 * current descriptor and post descriptor back to ring vpath
1024 mbuf_up = rxd_priv->mbuf_pkt;
1025 if (t_code != VXGE_HAL_RING_RXD_T_CODE_OK) {
1027 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1028 VXGE_DRV_STATS(vpath, rx_tcode);
1029 status = vxge_hal_ring_handle_tcode(vpath_handle,
1033 * If transfer code is not for unknown protocols and
1034 * vxge_hal_device_handle_tcode is NOT returned
1036 * drop this packet and increment rx_tcode stats
1038 if ((status != VXGE_HAL_OK) &&
1039 (t_code != VXGE_HAL_RING_T_CODE_L3_PKT_ERR)) {
1041 vxge_free_packet(mbuf_up);
1042 vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1047 if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) {
1049 * If unable to allocate buffer, post descriptor back
1050 * to vpath for future processing of same packet.
1052 vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1056 /* Get the extended information */
1057 vxge_hal_ring_rxd_1b_info_get(vpath_handle, rxdh, &ext_info);
1059 /* post descriptor with newly allocated mbuf back to vpath */
1060 vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1061 vpath->rxd_posted++;
1063 if (vpath->rxd_posted % VXGE_RXD_REPLENISH_COUNT == 0)
1064 vxge_hal_ring_rxd_post_post_db(vpath_handle);
1067 * Set successfully computed checksums in the mbuf.
1068 * Leave the rest to the stack to be reverified.
1070 vxge_rx_checksum(ext_info, mbuf_up);
1072 #if __FreeBSD_version >= 800000
1073 M_HASHTYPE_SET(mbuf_up, M_HASHTYPE_OPAQUE);
1074 mbuf_up->m_pkthdr.flowid = vpath->vp_index;
1076 /* Post-Read sync for buffers */
1077 bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1078 BUS_DMASYNC_POSTREAD);
1080 vxge_rx_input(ifp, mbuf_up, vpath);
1082 } while (vxge_hal_ring_rxd_next_completed(vpath_handle, &rxdh,
1083 &dtr_priv, &t_code) == VXGE_HAL_OK);
1085 /* Flush any outstanding LRO work */
1086 if (vpath->lro_enable && vpath->lro.lro_cnt)
1087 tcp_lro_flush_all(lro);
1093 vxge_rx_input(ifnet_t ifp, mbuf_t mbuf_up, vxge_vpath_t *vpath)
1095 if (vpath->lro_enable && vpath->lro.lro_cnt) {
1096 if (tcp_lro_rx(&vpath->lro, mbuf_up, 0) == 0)
1099 (*ifp->if_input) (ifp, mbuf_up);
1103 vxge_rx_checksum(vxge_hal_ring_rxd_info_t ext_info, mbuf_t mbuf_up)
1106 if (!(ext_info.proto & VXGE_HAL_FRAME_PROTO_IP_FRAG) &&
1107 (ext_info.proto & VXGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
1108 ext_info.l3_cksum_valid && ext_info.l4_cksum_valid) {
1110 mbuf_up->m_pkthdr.csum_data = htons(0xffff);
1112 mbuf_up->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1113 mbuf_up->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1114 mbuf_up->m_pkthdr.csum_flags |=
1115 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1119 if (ext_info.vlan) {
1120 mbuf_up->m_pkthdr.ether_vtag = ext_info.vlan;
1121 mbuf_up->m_flags |= M_VLANTAG;
1127 * vxge_rx_term During unload terminate and free all descriptors
1128 * @vpath_handle Rx vpath Handle @rxdh Rx Descriptor Handle @state Descriptor
1129 * State @userdata Per-adapter Data @reopen vpath open/reopen option
1133 vxge_rx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
1134 void *dtr_priv, vxge_hal_rxd_state_e state, void *userdata,
1135 vxge_hal_reopen_e reopen)
1137 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
1138 vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1140 if (state != VXGE_HAL_RXD_STATE_POSTED)
1143 if (rxd_priv != NULL) {
1144 bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1145 BUS_DMASYNC_POSTREAD);
1146 bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map);
1147 bus_dmamap_destroy(vpath->dma_tag_rx, rxd_priv->dma_map);
1149 vxge_free_packet(rxd_priv->mbuf_pkt);
1151 /* Free the descriptor */
1152 vxge_hal_ring_rxd_free(vpath_handle, rxdh);
1156 * vxge_rx_rxd_1b_get
1157 * Get descriptors of packet to send up
1160 vxge_rx_rxd_1b_get(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv)
1162 vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1163 mbuf_t mbuf_up = rxd_priv->mbuf_pkt;
1165 /* Retrieve data from completed descriptor */
1166 vxge_hal_ring_rxd_1b_get(vpath->handle, rxdh, &rxd_priv->dma_addr[0],
1167 (u32 *) &rxd_priv->dma_sizes[0]);
1169 /* Update newly created buffer to be sent up with packet length */
1170 mbuf_up->m_len = rxd_priv->dma_sizes[0];
1171 mbuf_up->m_pkthdr.len = rxd_priv->dma_sizes[0];
1172 mbuf_up->m_next = NULL;
1176 * vxge_rx_rxd_1b_set
1177 * Allocates new mbufs to be placed into descriptors
1180 vxge_rx_rxd_1b_set(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv)
1182 int num_segs, err = 0;
1185 bus_dmamap_t dma_map;
1186 bus_dma_segment_t dma_buffers[1];
1187 vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1189 vxge_dev_t *vdev = vpath->vdev;
1191 mbuf_pkt = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, vdev->rx_mbuf_sz);
1194 VXGE_DRV_STATS(vpath, rx_no_buf);
1195 device_printf(vdev->ndev, "out of memory to allocate mbuf\n");
1199 /* Update mbuf's length, packet length and receive interface */
1200 mbuf_pkt->m_len = vdev->rx_mbuf_sz;
1201 mbuf_pkt->m_pkthdr.len = vdev->rx_mbuf_sz;
1202 mbuf_pkt->m_pkthdr.rcvif = vdev->ifp;
1205 err = vxge_dma_mbuf_coalesce(vpath->dma_tag_rx, vpath->extra_dma_map,
1206 &mbuf_pkt, dma_buffers, &num_segs);
1208 VXGE_DRV_STATS(vpath, rx_map_fail);
1209 vxge_free_packet(mbuf_pkt);
1213 /* Unload DMA map of mbuf in current descriptor */
1214 bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1215 BUS_DMASYNC_POSTREAD);
1216 bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map);
1218 /* Update descriptor private data */
1219 dma_map = rxd_priv->dma_map;
1220 rxd_priv->mbuf_pkt = mbuf_pkt;
1221 rxd_priv->dma_addr[0] = htole64(dma_buffers->ds_addr);
1222 rxd_priv->dma_map = vpath->extra_dma_map;
1223 vpath->extra_dma_map = dma_map;
1225 /* Pre-Read/Write sync */
1226 bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1227 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1229 /* Set descriptor buffer */
1230 vxge_hal_ring_rxd_1b_set(rxdh, rxd_priv->dma_addr[0], vdev->rx_mbuf_sz);
1238 * Callback for Link-up indication from HAL
1242 vxge_link_up(vxge_hal_device_h devh, void *userdata)
1245 vxge_vpath_t *vpath;
1246 vxge_hal_device_hw_info_t *hw_info;
1248 vxge_dev_t *vdev = (vxge_dev_t *) userdata;
1249 hw_info = &vdev->config.hw_info;
1251 ifnet_t ifp = vdev->ifp;
1253 if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) {
1254 for (i = 0; i < vdev->no_of_vpath; i++) {
1255 vpath = &(vdev->vpaths[i]);
1256 vxge_hal_vpath_tti_ci_set(vpath->handle);
1257 vxge_hal_vpath_rti_ci_set(vpath->handle);
1261 if (vdev->is_privilaged && (hw_info->ports > 1)) {
1262 vxge_active_port_update(vdev);
1263 device_printf(vdev->ndev,
1264 "Active Port : %lld\n", vdev->active_port);
1267 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1268 if_link_state_change(ifp, LINK_STATE_UP);
1273 * Callback for Link-down indication from HAL
1277 vxge_link_down(vxge_hal_device_h devh, void *userdata)
1280 vxge_vpath_t *vpath;
1281 vxge_dev_t *vdev = (vxge_dev_t *) userdata;
1283 ifnet_t ifp = vdev->ifp;
1285 if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) {
1286 for (i = 0; i < vdev->no_of_vpath; i++) {
1287 vpath = &(vdev->vpaths[i]);
1288 vxge_hal_vpath_tti_ci_reset(vpath->handle);
1289 vxge_hal_vpath_rti_ci_reset(vpath->handle);
1293 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1294 if_link_state_change(ifp, LINK_STATE_DOWN);
1301 vxge_reset(vxge_dev_t *vdev)
1303 if (!vdev->is_initialized)
1306 VXGE_DRV_LOCK(vdev);
1307 vxge_stop_locked(vdev);
1308 vxge_init_locked(vdev);
1309 VXGE_DRV_UNLOCK(vdev);
1314 * Callback for Critical error indication from HAL
1318 vxge_crit_error(vxge_hal_device_h devh, void *userdata,
1319 vxge_hal_event_e type, u64 serr_data)
1321 vxge_dev_t *vdev = (vxge_dev_t *) userdata;
1322 ifnet_t ifp = vdev->ifp;
1325 case VXGE_HAL_EVENT_SERR:
1326 case VXGE_HAL_EVENT_KDFCCTL:
1327 case VXGE_HAL_EVENT_CRITICAL:
1328 vxge_hal_device_intr_disable(vdev->devh);
1329 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1330 if_link_state_change(ifp, LINK_STATE_DOWN);
1341 vxge_ifp_setup(device_t ndev)
1346 vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev);
1348 for (i = 0, j = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
1349 if (!bVAL1(vdev->config.hw_info.vpath_mask, i))
1352 if (j >= vdev->no_of_vpath)
1355 vdev->vpaths[j].vp_id = i;
1356 vdev->vpaths[j].vp_index = j;
1357 vdev->vpaths[j].vdev = vdev;
1358 vdev->vpaths[j].is_configured = TRUE;
1360 vxge_os_memcpy((u8 *) vdev->vpaths[j].mac_addr,
1361 (u8 *) (vdev->config.hw_info.mac_addrs[i]),
1362 (size_t) ETHER_ADDR_LEN);
1366 /* Get interface ifnet structure for this Ether device */
1367 ifp = if_alloc(IFT_ETHER);
1369 device_printf(vdev->ndev,
1370 "memory allocation for ifnet failed\n");
1376 /* Initialize interface ifnet structure */
1377 if_initname(ifp, device_get_name(ndev), device_get_unit(ndev));
1379 ifp->if_baudrate = VXGE_BAUDRATE;
1380 ifp->if_init = vxge_init;
1381 ifp->if_softc = vdev;
1382 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1383 ifp->if_ioctl = vxge_ioctl;
1384 ifp->if_start = vxge_send;
1386 #if __FreeBSD_version >= 800000
1387 ifp->if_transmit = vxge_mq_send;
1388 ifp->if_qflush = vxge_mq_qflush;
1390 ifp->if_snd.ifq_drv_maxlen = max(vdev->config.ifq_maxlen, ifqmaxlen);
1391 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1392 /* IFQ_SET_READY(&ifp->if_snd); */
1394 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1396 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
1397 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1398 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1400 if (vdev->config.tso_enable)
1401 vxge_tso_config(vdev);
1403 if (vdev->config.lro_enable)
1404 ifp->if_capabilities |= IFCAP_LRO;
1406 ifp->if_capenable = ifp->if_capabilities;
1408 strlcpy(vdev->ndev_name, device_get_nameunit(ndev),
1409 sizeof(vdev->ndev_name));
1411 /* Attach the interface */
1412 ether_ifattach(ifp, vdev->vpaths[0].mac_addr);
1420 * Register isr functions
1423 vxge_isr_setup(vxge_dev_t *vdev)
1425 int i, irq_rid, err = 0;
1426 vxge_vpath_t *vpath;
1429 void (*isr_func_ptr) (void *);
1431 switch (vdev->config.intr_mode) {
1432 case VXGE_HAL_INTR_MODE_IRQLINE:
1433 err = bus_setup_intr(vdev->ndev,
1434 vdev->config.isr_info[0].irq_res,
1435 (INTR_TYPE_NET | INTR_MPSAFE),
1436 vxge_isr_filter, vxge_isr_line, vdev,
1437 &vdev->config.isr_info[0].irq_handle);
1440 case VXGE_HAL_INTR_MODE_MSIX:
1441 for (i = 0; i < vdev->intr_count; i++) {
1443 irq_rid = vdev->config.isr_info[i].irq_rid;
1444 vpath = &vdev->vpaths[irq_rid / 4];
1446 if ((irq_rid % 4) == 2) {
1447 isr_func_ptr = vxge_isr_msix;
1448 isr_func_arg = (void *) vpath;
1449 } else if ((irq_rid % 4) == 3) {
1450 isr_func_ptr = vxge_isr_msix_alarm;
1451 isr_func_arg = (void *) vpath;
1455 err = bus_setup_intr(vdev->ndev,
1456 vdev->config.isr_info[i].irq_res,
1457 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
1458 (void *) isr_func_ptr, (void *) isr_func_arg,
1459 &vdev->config.isr_info[i].irq_handle);
1465 /* Teardown interrupt handler */
1467 bus_teardown_intr(vdev->ndev,
1468 vdev->config.isr_info[i].irq_res,
1469 vdev->config.isr_info[i].irq_handle);
1479 * ISR filter function - filter interrupts from other shared devices
1482 vxge_isr_filter(void *handle)
1485 vxge_dev_t *vdev = (vxge_dev_t *) handle;
1486 __hal_device_t *hldev = (__hal_device_t *) vdev->devh;
1488 vxge_hal_common_reg_t *common_reg =
1489 (vxge_hal_common_reg_t *) (hldev->common_reg);
1491 val64 = vxge_os_pio_mem_read64(vdev->pdev, (vdev->devh)->regh0,
1492 &common_reg->titan_general_int_status);
1494 return ((val64) ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
1499 * Interrupt service routine for Line interrupts
1502 vxge_isr_line(void *vdev_ptr)
1504 vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr;
1506 vxge_hal_device_handle_irq(vdev->devh, 0);
1510 vxge_isr_msix(void *vpath_ptr)
1515 __hal_virtualpath_t *hal_vpath;
1516 vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr;
1517 vxge_dev_t *vdev = vpath->vdev;
1518 hal_vpath = ((__hal_vpath_handle_t *) vpath->handle)->vpath;
1520 VXGE_DRV_STATS(vpath, isr_msix);
1521 VXGE_HAL_DEVICE_STATS_SW_INFO_TRAFFIC_INTR(vdev->devh);
1523 vxge_hal_vpath_mf_msix_mask(vpath->handle, vpath->msix_vec);
1526 vxge_hal_vpath_poll_rx(vpath->handle, &got_rx);
1529 if (hal_vpath->vp_config->fifo.enable) {
1530 vxge_intr_coalesce_tx(vpath);
1531 vxge_hal_vpath_poll_tx(vpath->handle, &got_tx);
1534 vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec);
1538 vxge_isr_msix_alarm(void *vpath_ptr)
1541 vxge_hal_status_e status = VXGE_HAL_OK;
1543 vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr;
1544 vxge_dev_t *vdev = vpath->vdev;
1546 VXGE_HAL_DEVICE_STATS_SW_INFO_NOT_TRAFFIC_INTR(vdev->devh);
1548 /* Process alarms in each vpath */
1549 for (i = 0; i < vdev->no_of_vpath; i++) {
1551 vpath = &(vdev->vpaths[i]);
1552 vxge_hal_vpath_mf_msix_mask(vpath->handle,
1553 vpath->msix_vec_alarm);
1554 status = vxge_hal_vpath_alarm_process(vpath->handle, 0);
1555 if ((status == VXGE_HAL_ERR_EVENT_SLOT_FREEZE) ||
1556 (status == VXGE_HAL_ERR_EVENT_SERR)) {
1557 device_printf(vdev->ndev,
1558 "processing alarms urecoverable error %x\n",
1561 /* Stop the driver */
1562 vdev->is_initialized = FALSE;
1565 vxge_hal_vpath_mf_msix_unmask(vpath->handle,
1566 vpath->msix_vec_alarm);
1574 vxge_msix_enable(vxge_dev_t *vdev)
1576 int i, first_vp_id, msix_id;
1578 vxge_vpath_t *vpath;
1579 vxge_hal_status_e status = VXGE_HAL_OK;
1582 * Unmasking and Setting MSIX vectors before enabling interrupts
1583 * tim[] : 0 - Tx ## 1 - Rx ## 2 - UMQ-DMQ ## 0 - BITMAP
1585 int tim[4] = {0, 1, 0, 0};
1587 for (i = 0; i < vdev->no_of_vpath; i++) {
1589 vpath = vdev->vpaths + i;
1590 first_vp_id = vdev->vpaths[0].vp_id;
1592 msix_id = vpath->vp_id * VXGE_HAL_VPATH_MSIX_ACTIVE;
1593 tim[1] = vpath->msix_vec = msix_id + 1;
1595 vpath->msix_vec_alarm = first_vp_id *
1596 VXGE_HAL_VPATH_MSIX_ACTIVE + VXGE_HAL_VPATH_MSIX_ALARM_ID;
1598 status = vxge_hal_vpath_mf_msix_set(vpath->handle,
1599 tim, VXGE_HAL_VPATH_MSIX_ALARM_ID);
1601 if (status != VXGE_HAL_OK) {
1602 device_printf(vdev->ndev,
1603 "failed to set msix vectors to vpath\n");
1607 vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec);
1608 vxge_hal_vpath_mf_msix_unmask(vpath->handle,
1609 vpath->msix_vec_alarm);
1617 * Initializes, adds and sets media
1620 vxge_media_init(vxge_dev_t *vdev)
1622 ifmedia_init(&vdev->media,
1623 IFM_IMASK, vxge_media_change, vxge_media_status);
1625 /* Add supported media */
1626 ifmedia_add(&vdev->media,
1627 IFM_ETHER | vdev->ifm_optics | IFM_FDX,
1631 ifmedia_add(&vdev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1632 ifmedia_set(&vdev->media, IFM_ETHER | IFM_AUTO);
1637 * Callback for interface media settings
1640 vxge_media_status(ifnet_t ifp, struct ifmediareq *ifmr)
1642 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
1643 vxge_hal_device_t *hldev = vdev->devh;
1645 ifmr->ifm_status = IFM_AVALID;
1646 ifmr->ifm_active = IFM_ETHER;
1648 /* set link state */
1649 if (vxge_hal_device_link_state_get(hldev) == VXGE_HAL_LINK_UP) {
1650 ifmr->ifm_status |= IFM_ACTIVE;
1651 ifmr->ifm_active |= vdev->ifm_optics | IFM_FDX;
1652 if_link_state_change(ifp, LINK_STATE_UP);
1658 * Media change driver callback
1661 vxge_media_change(ifnet_t ifp)
1663 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
1664 struct ifmedia *ifmediap = &vdev->media;
1666 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER ? EINVAL : 0);
1670 * Allocate PCI resources
1673 vxge_alloc_resources(vxge_dev_t *vdev)
1676 vxge_pci_info_t *pci_info = NULL;
1677 vxge_free_resources_e error_level = VXGE_FREE_NONE;
1679 device_t ndev = vdev->ndev;
1681 /* Allocate Buffer for HAL Device Configuration */
1682 vdev->device_config = (vxge_hal_device_config_t *)
1683 vxge_mem_alloc(sizeof(vxge_hal_device_config_t));
1685 if (!vdev->device_config) {
1687 error_level = VXGE_DISABLE_PCI_BUSMASTER;
1688 device_printf(vdev->ndev,
1689 "failed to allocate memory for device config\n");
1694 pci_info = (vxge_pci_info_t *) vxge_mem_alloc(sizeof(vxge_pci_info_t));
1696 error_level = VXGE_FREE_DEVICE_CONFIG;
1698 device_printf(vdev->ndev,
1699 "failed to allocate memory for pci info\n");
1702 pci_info->ndev = ndev;
1703 vdev->pdev = pci_info;
1705 err = vxge_alloc_bar_resources(vdev, 0);
1707 error_level = VXGE_FREE_BAR0;
1711 err = vxge_alloc_bar_resources(vdev, 1);
1713 error_level = VXGE_FREE_BAR1;
1717 err = vxge_alloc_bar_resources(vdev, 2);
1719 error_level = VXGE_FREE_BAR2;
1723 vxge_free_resources(ndev, error_level);
1729 * vxge_alloc_bar_resources
1730 * Allocates BAR resources
1733 vxge_alloc_bar_resources(vxge_dev_t *vdev, int i)
1737 vxge_pci_info_t *pci_info = vdev->pdev;
1739 res_id = PCIR_BAR((i == 0) ? 0 : (i * 2));
1741 pci_info->bar_info[i] =
1742 bus_alloc_resource_any(vdev->ndev,
1743 SYS_RES_MEMORY, &res_id, RF_ACTIVE);
1745 if (pci_info->bar_info[i] == NULL) {
1746 device_printf(vdev->ndev,
1747 "failed to allocate memory for bus resources\n");
1752 pci_info->reg_map[i] =
1753 (vxge_bus_res_t *) vxge_mem_alloc(sizeof(vxge_bus_res_t));
1755 if (pci_info->reg_map[i] == NULL) {
1756 device_printf(vdev->ndev,
1757 "failed to allocate memory bar resources\n");
1762 ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_tag =
1763 rman_get_bustag(pci_info->bar_info[i]);
1765 ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_handle =
1766 rman_get_bushandle(pci_info->bar_info[i]);
1768 ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bar_start_addr =
1769 pci_info->bar_info[i];
1771 ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_res_len =
1772 rman_get_size(pci_info->bar_info[i]);
1779 * vxge_alloc_isr_resources
1782 vxge_alloc_isr_resources(vxge_dev_t *vdev)
1784 int i, err = 0, irq_rid;
1785 int msix_vec_reqd, intr_count, msix_count;
1787 int intr_mode = VXGE_HAL_INTR_MODE_IRQLINE;
1789 if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) {
1790 /* MSI-X messages supported by device */
1791 intr_count = pci_msix_count(vdev->ndev);
1794 msix_vec_reqd = 4 * vdev->no_of_vpath;
1795 if (intr_count >= msix_vec_reqd) {
1796 intr_count = msix_vec_reqd;
1798 err = pci_alloc_msix(vdev->ndev, &intr_count);
1800 intr_mode = VXGE_HAL_INTR_MODE_MSIX;
1803 if ((err != 0) || (intr_count < msix_vec_reqd)) {
1804 device_printf(vdev->ndev, "Unable to allocate "
1805 "msi/x vectors switching to INTA mode\n");
1811 vdev->intr_count = 0;
1812 vdev->config.intr_mode = intr_mode;
1814 switch (vdev->config.intr_mode) {
1815 case VXGE_HAL_INTR_MODE_IRQLINE:
1816 vdev->config.isr_info[0].irq_rid = 0;
1817 vdev->config.isr_info[0].irq_res =
1818 bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ,
1819 &vdev->config.isr_info[0].irq_rid,
1820 (RF_SHAREABLE | RF_ACTIVE));
1822 if (vdev->config.isr_info[0].irq_res == NULL) {
1823 device_printf(vdev->ndev,
1824 "failed to allocate line interrupt resource\n");
1831 case VXGE_HAL_INTR_MODE_MSIX:
1833 for (i = 0; i < vdev->no_of_vpath; i++) {
1836 vdev->config.isr_info[msix_count].irq_rid = irq_rid + 2;
1837 vdev->config.isr_info[msix_count].irq_res =
1838 bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ,
1839 &vdev->config.isr_info[msix_count].irq_rid,
1840 (RF_SHAREABLE | RF_ACTIVE));
1842 if (vdev->config.isr_info[msix_count].irq_res == NULL) {
1843 device_printf(vdev->ndev,
1844 "allocating bus resource (rid %d) failed\n",
1845 vdev->config.isr_info[msix_count].irq_rid);
1851 err = bus_bind_intr(vdev->ndev,
1852 vdev->config.isr_info[msix_count].irq_res,
1860 vdev->config.isr_info[msix_count].irq_rid = 3;
1861 vdev->config.isr_info[msix_count].irq_res =
1862 bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ,
1863 &vdev->config.isr_info[msix_count].irq_rid,
1864 (RF_SHAREABLE | RF_ACTIVE));
1866 if (vdev->config.isr_info[msix_count].irq_res == NULL) {
1867 device_printf(vdev->ndev,
1868 "allocating bus resource (rid %d) failed\n",
1869 vdev->config.isr_info[msix_count].irq_rid);
1875 err = bus_bind_intr(vdev->ndev,
1876 vdev->config.isr_info[msix_count].irq_res, (i % mp_ncpus));
1881 vdev->device_config->intr_mode = vdev->config.intr_mode;
1888 * vxge_free_resources
1889 * Undo what-all we did during load/attach
1892 vxge_free_resources(device_t ndev, vxge_free_resources_e vxge_free_resource)
1897 vdev = (vxge_dev_t *) device_get_softc(ndev);
1899 switch (vxge_free_resource) {
1901 for (i = 0; i < vdev->intr_count; i++) {
1902 bus_teardown_intr(ndev,
1903 vdev->config.isr_info[i].irq_res,
1904 vdev->config.isr_info[i].irq_handle);
1908 case VXGE_FREE_INTERFACE:
1909 ether_ifdetach(vdev->ifp);
1910 bus_generic_detach(ndev);
1914 case VXGE_FREE_MEDIA:
1915 ifmedia_removeall(&vdev->media);
1918 case VXGE_FREE_MUTEX:
1919 vxge_mutex_destroy(vdev);
1922 case VXGE_FREE_VPATH:
1923 vxge_mem_free(vdev->vpaths,
1924 vdev->no_of_vpath * sizeof(vxge_vpath_t));
1927 case VXGE_FREE_TERMINATE_DEVICE:
1928 if (vdev->devh != NULL) {
1929 vxge_hal_device_private_set(vdev->devh, 0);
1930 vxge_hal_device_terminate(vdev->devh);
1934 case VXGE_FREE_ISR_RESOURCE:
1935 vxge_free_isr_resources(vdev);
1938 case VXGE_FREE_BAR2:
1939 vxge_free_bar_resources(vdev, 2);
1942 case VXGE_FREE_BAR1:
1943 vxge_free_bar_resources(vdev, 1);
1946 case VXGE_FREE_BAR0:
1947 vxge_free_bar_resources(vdev, 0);
1950 case VXGE_FREE_PCI_INFO:
1951 vxge_mem_free(vdev->pdev, sizeof(vxge_pci_info_t));
1954 case VXGE_FREE_DEVICE_CONFIG:
1955 vxge_mem_free(vdev->device_config,
1956 sizeof(vxge_hal_device_config_t));
1959 case VXGE_DISABLE_PCI_BUSMASTER:
1960 pci_disable_busmaster(ndev);
1963 case VXGE_FREE_TERMINATE_DRIVER:
1964 if (vxge_dev_ref_count) {
1965 --vxge_dev_ref_count;
1966 if (0 == vxge_dev_ref_count)
1967 vxge_hal_driver_terminate();
1972 case VXGE_FREE_NONE:
1979 vxge_free_isr_resources(vxge_dev_t *vdev)
1983 switch (vdev->config.intr_mode) {
1984 case VXGE_HAL_INTR_MODE_IRQLINE:
1985 if (vdev->config.isr_info[0].irq_res) {
1986 bus_release_resource(vdev->ndev, SYS_RES_IRQ,
1987 vdev->config.isr_info[0].irq_rid,
1988 vdev->config.isr_info[0].irq_res);
1990 vdev->config.isr_info[0].irq_res = NULL;
1994 case VXGE_HAL_INTR_MODE_MSIX:
1995 for (i = 0; i < vdev->intr_count; i++) {
1996 if (vdev->config.isr_info[i].irq_res) {
1997 bus_release_resource(vdev->ndev, SYS_RES_IRQ,
1998 vdev->config.isr_info[i].irq_rid,
1999 vdev->config.isr_info[i].irq_res);
2001 vdev->config.isr_info[i].irq_res = NULL;
2005 if (vdev->intr_count)
2006 pci_release_msi(vdev->ndev);
2013 vxge_free_bar_resources(vxge_dev_t *vdev, int i)
2016 vxge_pci_info_t *pci_info = vdev->pdev;
2018 res_id = PCIR_BAR((i == 0) ? 0 : (i * 2));
2020 if (pci_info->bar_info[i])
2021 bus_release_resource(vdev->ndev, SYS_RES_MEMORY,
2022 res_id, pci_info->bar_info[i]);
2024 vxge_mem_free(pci_info->reg_map[i], sizeof(vxge_bus_res_t));
2029 * Initializes mutexes used in driver
2032 vxge_mutex_init(vxge_dev_t *vdev)
2036 snprintf(vdev->mtx_drv_name, sizeof(vdev->mtx_drv_name),
2037 "%s_drv", vdev->ndev_name);
2039 mtx_init(&vdev->mtx_drv, vdev->mtx_drv_name,
2040 MTX_NETWORK_LOCK, MTX_DEF);
2042 for (i = 0; i < vdev->no_of_vpath; i++) {
2043 snprintf(vdev->vpaths[i].mtx_tx_name,
2044 sizeof(vdev->vpaths[i].mtx_tx_name), "%s_tx_%d",
2045 vdev->ndev_name, i);
2047 mtx_init(&vdev->vpaths[i].mtx_tx,
2048 vdev->vpaths[i].mtx_tx_name, NULL, MTX_DEF);
2053 * vxge_mutex_destroy
2054 * Destroys mutexes used in driver
2057 vxge_mutex_destroy(vxge_dev_t *vdev)
2061 for (i = 0; i < vdev->no_of_vpath; i++)
2062 VXGE_TX_LOCK_DESTROY(&(vdev->vpaths[i]));
2064 VXGE_DRV_LOCK_DESTROY(vdev);
2071 vxge_rth_config(vxge_dev_t *vdev)
2074 vxge_hal_vpath_h vpath_handle;
2075 vxge_hal_rth_hash_types_t hash_types;
2076 vxge_hal_status_e status = VXGE_HAL_OK;
2077 u8 mtable[256] = {0};
2079 /* Filling matable with bucket-to-vpath mapping */
2080 vdev->config.rth_bkt_sz = VXGE_DEFAULT_RTH_BUCKET_SIZE;
2082 for (i = 0; i < (1 << vdev->config.rth_bkt_sz); i++)
2083 mtable[i] = i % vdev->no_of_vpath;
2085 /* Fill RTH hash types */
2086 hash_types.hash_type_tcpipv4_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV4;
2087 hash_types.hash_type_tcpipv6_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6;
2088 hash_types.hash_type_tcpipv6ex_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6_EX;
2089 hash_types.hash_type_ipv4_en = VXGE_HAL_RING_HASH_TYPE_IPV4;
2090 hash_types.hash_type_ipv6_en = VXGE_HAL_RING_HASH_TYPE_IPV6;
2091 hash_types.hash_type_ipv6ex_en = VXGE_HAL_RING_HASH_TYPE_IPV6_EX;
2093 /* set indirection table, bucket-to-vpath mapping */
2094 status = vxge_hal_vpath_rts_rth_itable_set(vdev->vpath_handles,
2095 vdev->no_of_vpath, mtable,
2096 ((u32) (1 << vdev->config.rth_bkt_sz)));
2098 if (status != VXGE_HAL_OK) {
2099 device_printf(vdev->ndev, "rth configuration failed\n");
2102 for (i = 0; i < vdev->no_of_vpath; i++) {
2103 vpath_handle = vxge_vpath_handle_get(vdev, i);
2107 status = vxge_hal_vpath_rts_rth_set(vpath_handle,
2109 &hash_types, vdev->config.rth_bkt_sz, TRUE);
2110 if (status != VXGE_HAL_OK) {
2111 device_printf(vdev->ndev,
2112 "rth configuration failed for vpath (%d)\n",
2113 vdev->vpaths[i].vp_id);
2124 * Sets HAL parameter values from kenv
2127 vxge_vpath_config(vxge_dev_t *vdev)
2130 u32 no_of_vpath = 0;
2131 vxge_hal_vp_config_t *vp_config;
2132 vxge_hal_device_config_t *device_config = vdev->device_config;
2134 device_config->debug_level = VXGE_TRACE;
2135 device_config->debug_mask = VXGE_COMPONENT_ALL;
2136 device_config->device_poll_millis = VXGE_DEFAULT_DEVICE_POLL_MILLIS;
2138 vdev->config.no_of_vpath =
2139 min(vdev->config.no_of_vpath, vdev->max_supported_vpath);
2141 for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
2142 vp_config = &(device_config->vp_config[i]);
2143 vp_config->fifo.enable = VXGE_HAL_FIFO_DISABLE;
2144 vp_config->ring.enable = VXGE_HAL_RING_DISABLE;
2147 for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
2148 if (no_of_vpath >= vdev->config.no_of_vpath)
2151 if (!bVAL1(vdev->config.hw_info.vpath_mask, i))
2155 vp_config = &(device_config->vp_config[i]);
2156 vp_config->mtu = VXGE_HAL_DEFAULT_MTU;
2157 vp_config->ring.enable = VXGE_HAL_RING_ENABLE;
2158 vp_config->ring.post_mode = VXGE_HAL_RING_POST_MODE_DOORBELL;
2159 vp_config->ring.buffer_mode = VXGE_HAL_RING_RXD_BUFFER_MODE_1;
2160 vp_config->ring.ring_length =
2161 vxge_ring_length_get(VXGE_HAL_RING_RXD_BUFFER_MODE_1);
2162 vp_config->ring.scatter_mode = VXGE_HAL_RING_SCATTER_MODE_A;
2163 vp_config->rpa_all_vid_en = VXGE_DEFAULT_ALL_VID_ENABLE;
2164 vp_config->rpa_strip_vlan_tag = VXGE_DEFAULT_STRIP_VLAN_TAG;
2165 vp_config->rpa_ucast_all_addr_en =
2166 VXGE_HAL_VPATH_RPA_UCAST_ALL_ADDR_DISABLE;
2168 vp_config->rti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE;
2169 vp_config->rti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE;
2170 vp_config->rti.util_sel =
2171 VXGE_HAL_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
2173 vp_config->rti.uec_a = VXGE_DEFAULT_RTI_RX_UFC_A;
2174 vp_config->rti.uec_b = VXGE_DEFAULT_RTI_RX_UFC_B;
2175 vp_config->rti.uec_c = VXGE_DEFAULT_RTI_RX_UFC_C;
2176 vp_config->rti.uec_d = VXGE_DEFAULT_RTI_RX_UFC_D;
2178 vp_config->rti.urange_a = VXGE_DEFAULT_RTI_RX_URANGE_A;
2179 vp_config->rti.urange_b = VXGE_DEFAULT_RTI_RX_URANGE_B;
2180 vp_config->rti.urange_c = VXGE_DEFAULT_RTI_RX_URANGE_C;
2182 vp_config->rti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE;
2183 vp_config->rti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE;
2185 vp_config->rti.btimer_val =
2186 (VXGE_DEFAULT_RTI_BTIMER_VAL * 1000) / 272;
2187 vp_config->rti.rtimer_val =
2188 (VXGE_DEFAULT_RTI_RTIMER_VAL * 1000) / 272;
2189 vp_config->rti.ltimer_val =
2190 (VXGE_DEFAULT_RTI_LTIMER_VAL * 1000) / 272;
2192 if ((no_of_vpath > 1) && (VXGE_DEFAULT_CONFIG_MQ_ENABLE == 0))
2195 vp_config->fifo.enable = VXGE_HAL_FIFO_ENABLE;
2196 vp_config->fifo.max_aligned_frags =
2197 VXGE_DEFAULT_FIFO_ALIGNED_FRAGS;
2199 vp_config->tti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE;
2200 vp_config->tti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE;
2201 vp_config->tti.util_sel =
2202 VXGE_HAL_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
2204 vp_config->tti.uec_a = VXGE_DEFAULT_TTI_TX_UFC_A;
2205 vp_config->tti.uec_b = VXGE_DEFAULT_TTI_TX_UFC_B;
2206 vp_config->tti.uec_c = VXGE_DEFAULT_TTI_TX_UFC_C;
2207 vp_config->tti.uec_d = VXGE_DEFAULT_TTI_TX_UFC_D;
2209 vp_config->tti.urange_a = VXGE_DEFAULT_TTI_TX_URANGE_A;
2210 vp_config->tti.urange_b = VXGE_DEFAULT_TTI_TX_URANGE_B;
2211 vp_config->tti.urange_c = VXGE_DEFAULT_TTI_TX_URANGE_C;
2213 vp_config->tti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE;
2214 vp_config->tti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE;
2216 vp_config->tti.btimer_val =
2217 (VXGE_DEFAULT_TTI_BTIMER_VAL * 1000) / 272;
2218 vp_config->tti.rtimer_val =
2219 (VXGE_DEFAULT_TTI_RTIMER_VAL * 1000) / 272;
2220 vp_config->tti.ltimer_val =
2221 (VXGE_DEFAULT_TTI_LTIMER_VAL * 1000) / 272;
2224 vdev->no_of_vpath = no_of_vpath;
2226 if (vdev->no_of_vpath == 1)
2227 vdev->config.tx_steering = 0;
2229 if (vdev->config.rth_enable && (vdev->no_of_vpath > 1)) {
2230 device_config->rth_en = VXGE_HAL_RTH_ENABLE;
2231 device_config->rth_it_type = VXGE_HAL_RTH_IT_TYPE_MULTI_IT;
2234 vdev->config.rth_enable = device_config->rth_en;
2239 * Virtual path Callback function
2242 static vxge_hal_status_e
2243 vxge_vpath_cb_fn(vxge_hal_client_h client_handle, vxge_hal_up_msg_h msgh,
2244 vxge_hal_message_type_e msg_type, vxge_hal_obj_id_t obj_id,
2245 vxge_hal_result_e result, vxge_hal_opaque_handle_t *opaque_handle)
2247 return (VXGE_HAL_OK);
2254 vxge_vpath_open(vxge_dev_t *vdev)
2256 int i, err = EINVAL;
2259 vxge_vpath_t *vpath;
2260 vxge_hal_vpath_attr_t vpath_attr;
2261 vxge_hal_status_e status = VXGE_HAL_OK;
2262 struct lro_ctrl *lro = NULL;
2264 bzero(&vpath_attr, sizeof(vxge_hal_vpath_attr_t));
2266 for (i = 0; i < vdev->no_of_vpath; i++) {
2268 vpath = &(vdev->vpaths[i]);
2271 /* Vpath vpath_attr: FIFO */
2272 vpath_attr.vp_id = vpath->vp_id;
2273 vpath_attr.fifo_attr.callback = vxge_tx_compl;
2274 vpath_attr.fifo_attr.txdl_init = vxge_tx_replenish;
2275 vpath_attr.fifo_attr.txdl_term = vxge_tx_term;
2276 vpath_attr.fifo_attr.userdata = vpath;
2277 vpath_attr.fifo_attr.per_txdl_space = sizeof(vxge_txdl_priv_t);
2279 /* Vpath vpath_attr: Ring */
2280 vpath_attr.ring_attr.callback = vxge_rx_compl;
2281 vpath_attr.ring_attr.rxd_init = vxge_rx_replenish;
2282 vpath_attr.ring_attr.rxd_term = vxge_rx_term;
2283 vpath_attr.ring_attr.userdata = vpath;
2284 vpath_attr.ring_attr.per_rxd_space = sizeof(vxge_rxd_priv_t);
2286 err = vxge_dma_tags_create(vpath);
2288 device_printf(vdev->ndev,
2289 "failed to create dma tags\n");
2292 #if __FreeBSD_version >= 800000
2293 vpath->br = buf_ring_alloc(VXGE_DEFAULT_BR_SIZE, M_DEVBUF,
2294 M_WAITOK, &vpath->mtx_tx);
2295 if (vpath->br == NULL) {
2300 status = vxge_hal_vpath_open(vdev->devh, &vpath_attr,
2301 (vxge_hal_vpath_callback_f) vxge_vpath_cb_fn,
2302 NULL, &vpath->handle);
2303 if (status != VXGE_HAL_OK) {
2304 device_printf(vdev->ndev,
2305 "failed to open vpath (%d)\n", vpath->vp_id);
2309 vpath->is_open = TRUE;
2310 vdev->vpath_handles[i] = vpath->handle;
2312 vpath->tx_ticks = ticks;
2313 vpath->rx_ticks = ticks;
2315 vpath->tti_rtimer_val = VXGE_DEFAULT_TTI_RTIMER_VAL;
2316 vpath->rti_rtimer_val = VXGE_DEFAULT_RTI_RTIMER_VAL;
2318 vpath->tx_intr_coalesce = vdev->config.intr_coalesce;
2319 vpath->rx_intr_coalesce = vdev->config.intr_coalesce;
2321 func_id = vdev->config.hw_info.func_id;
2323 if (vdev->config.low_latency &&
2324 (vdev->config.bw_info[func_id].priority ==
2325 VXGE_DEFAULT_VPATH_PRIORITY_HIGH)) {
2326 vpath->tx_intr_coalesce = 0;
2329 if (vdev->ifp->if_capenable & IFCAP_LRO) {
2330 err = tcp_lro_init(lro);
2332 device_printf(vdev->ndev,
2333 "LRO Initialization failed!\n");
2336 vpath->lro_enable = TRUE;
2337 lro->ifp = vdev->ifp;
2345 vxge_tso_config(vxge_dev_t *vdev)
2347 u32 func_id, priority;
2348 vxge_hal_status_e status = VXGE_HAL_OK;
2350 vdev->ifp->if_capabilities |= IFCAP_TSO4;
2352 status = vxge_bw_priority_get(vdev, NULL);
2353 if (status == VXGE_HAL_OK) {
2355 func_id = vdev->config.hw_info.func_id;
2356 priority = vdev->config.bw_info[func_id].priority;
2358 if (priority != VXGE_DEFAULT_VPATH_PRIORITY_HIGH)
2359 vdev->ifp->if_capabilities &= ~IFCAP_TSO4;
2362 #if __FreeBSD_version >= 800000
2363 if (vdev->ifp->if_capabilities & IFCAP_TSO4)
2364 vdev->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2370 vxge_bw_priority_get(vxge_dev_t *vdev, vxge_bw_info_t *bw_info)
2372 u32 priority, bandwidth;
2375 u64 func_id, func_mode, vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS];
2376 vxge_hal_status_e status = VXGE_HAL_OK;
2378 func_id = vdev->config.hw_info.func_id;
2380 func_id = bw_info->func_id;
2381 func_mode = vdev->config.hw_info.function_mode;
2382 if ((is_single_func(func_mode)) && (func_id > 0))
2383 return (VXGE_HAL_FAIL);
2386 if (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0)) {
2388 status = vxge_hal_vf_rx_bw_get(vdev->devh,
2389 func_id, &bandwidth, &priority);
2393 status = vxge_hal_get_vpath_list(vdev->devh,
2394 func_id, vpath_list, &vpath_count);
2396 if (status == VXGE_HAL_OK) {
2397 status = vxge_hal_bw_priority_get(vdev->devh,
2398 vpath_list[0], &bandwidth, &priority);
2402 if (status == VXGE_HAL_OK) {
2404 bw_info->priority = priority;
2405 bw_info->bandwidth = bandwidth;
2407 vdev->config.bw_info[func_id].priority = priority;
2408 vdev->config.bw_info[func_id].bandwidth = bandwidth;
2419 vxge_vpath_close(vxge_dev_t *vdev)
2422 vxge_vpath_t *vpath;
2424 for (i = 0; i < vdev->no_of_vpath; i++) {
2426 vpath = &(vdev->vpaths[i]);
2428 vxge_hal_vpath_close(vpath->handle);
2430 #if __FreeBSD_version >= 800000
2431 if (vpath->br != NULL)
2432 buf_ring_free(vpath->br, M_DEVBUF);
2434 /* Free LRO memory */
2435 if (vpath->lro_enable)
2436 tcp_lro_free(&vpath->lro);
2438 if (vpath->dma_tag_rx) {
2439 bus_dmamap_destroy(vpath->dma_tag_rx,
2440 vpath->extra_dma_map);
2441 bus_dma_tag_destroy(vpath->dma_tag_rx);
2444 if (vpath->dma_tag_tx)
2445 bus_dma_tag_destroy(vpath->dma_tag_tx);
2447 vpath->handle = NULL;
2448 vpath->is_open = FALSE;
2456 vxge_vpath_reset(vxge_dev_t *vdev)
2459 vxge_hal_vpath_h vpath_handle;
2460 vxge_hal_status_e status = VXGE_HAL_OK;
2462 for (i = 0; i < vdev->no_of_vpath; i++) {
2463 vpath_handle = vxge_vpath_handle_get(vdev, i);
2467 status = vxge_hal_vpath_reset(vpath_handle);
2468 if (status != VXGE_HAL_OK)
2469 device_printf(vdev->ndev,
2470 "failed to reset vpath :%d\n", i);
2475 vxge_vpath_get(vxge_dev_t *vdev, mbuf_t mhead)
2477 struct tcphdr *th = NULL;
2478 struct udphdr *uh = NULL;
2479 struct ip *ip = NULL;
2480 struct ip6_hdr *ip6 = NULL;
2481 struct ether_vlan_header *eth = NULL;
2484 int ehdrlen, iphlen = 0;
2486 u16 etype, src_port, dst_port;
2487 u16 queue_len, counter = 0;
2489 src_port = dst_port = 0;
2490 queue_len = vdev->no_of_vpath;
2492 eth = mtod(mhead, struct ether_vlan_header *);
2493 if (eth->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2494 etype = ntohs(eth->evl_proto);
2495 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2497 etype = ntohs(eth->evl_encap_proto);
2498 ehdrlen = ETHER_HDR_LEN;
2503 ip = (struct ip *) (mhead->m_data + ehdrlen);
2504 iphlen = ip->ip_hl << 2;
2506 th = (struct tcphdr *) ((caddr_t)ip + iphlen);
2507 uh = (struct udphdr *) ((caddr_t)ip + iphlen);
2510 case ETHERTYPE_IPV6:
2511 ip6 = (struct ip6_hdr *) (mhead->m_data + ehdrlen);
2512 iphlen = sizeof(struct ip6_hdr);
2513 ipproto = ip6->ip6_nxt;
2515 ulp = mtod(mhead, char *) + iphlen;
2516 th = ((struct tcphdr *) (ulp));
2517 uh = ((struct udphdr *) (ulp));
2526 src_port = th->th_sport;
2527 dst_port = th->th_dport;
2531 src_port = uh->uh_sport;
2532 dst_port = uh->uh_dport;
2539 counter = (ntohs(src_port) + ntohs(dst_port)) &
2540 vpath_selector[queue_len - 1];
2542 if (counter >= queue_len)
2543 counter = queue_len - 1;
2548 static inline vxge_hal_vpath_h
2549 vxge_vpath_handle_get(vxge_dev_t *vdev, int i)
2551 return (vdev->vpaths[i].is_open ? vdev->vpaths[i].handle : NULL);
2555 vxge_firmware_verify(vxge_dev_t *vdev)
2559 vxge_hal_status_e status = VXGE_HAL_FAIL;
2561 if (vdev->fw_upgrade) {
2562 status = vxge_firmware_upgrade(vdev);
2563 if (status == VXGE_HAL_OK) {
2569 if ((vdev->config.function_mode != VXGE_DEFAULT_CONFIG_VALUE) &&
2570 (vdev->config.hw_info.function_mode !=
2571 (u64) vdev->config.function_mode)) {
2573 status = vxge_func_mode_set(vdev);
2574 if (status == VXGE_HAL_OK)
2578 /* l2_switch configuration */
2579 active_config = VXGE_DEFAULT_CONFIG_VALUE;
2580 status = vxge_hal_get_active_config(vdev->devh,
2581 VXGE_HAL_XMAC_NWIF_ActConfig_L2SwitchEnabled,
2584 if (status == VXGE_HAL_OK) {
2585 vdev->l2_switch = active_config;
2586 if (vdev->config.l2_switch != VXGE_DEFAULT_CONFIG_VALUE) {
2587 if (vdev->config.l2_switch != active_config) {
2588 status = vxge_l2switch_mode_set(vdev);
2589 if (status == VXGE_HAL_OK)
2595 if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) {
2596 if (vxge_port_mode_update(vdev) == ENXIO)
2602 device_printf(vdev->ndev, "PLEASE POWER CYCLE THE SYSTEM\n");
2608 vxge_firmware_upgrade(vxge_dev_t *vdev)
2612 vxge_hal_device_hw_info_t *hw_info;
2613 vxge_hal_status_e status = VXGE_HAL_OK;
2615 hw_info = &vdev->config.hw_info;
2617 fw_size = sizeof(VXGE_FW_ARRAY_NAME);
2618 fw_buffer = (u8 *) VXGE_FW_ARRAY_NAME;
2620 device_printf(vdev->ndev, "Current firmware version : %s (%s)\n",
2621 hw_info->fw_version.version, hw_info->fw_date.date);
2623 device_printf(vdev->ndev, "Upgrading firmware to %d.%d.%d\n",
2624 VXGE_MIN_FW_MAJOR_VERSION, VXGE_MIN_FW_MINOR_VERSION,
2625 VXGE_MIN_FW_BUILD_NUMBER);
2627 /* Call HAL API to upgrade firmware */
2628 status = vxge_hal_mrpcim_fw_upgrade(vdev->pdev,
2629 (pci_reg_h) vdev->pdev->reg_map[0],
2630 (u8 *) vdev->pdev->bar_info[0],
2631 fw_buffer, fw_size);
2633 device_printf(vdev->ndev, "firmware upgrade %s\n",
2634 (status == VXGE_HAL_OK) ? "successful" : "failed");
2640 vxge_func_mode_set(vxge_dev_t *vdev)
2643 vxge_hal_status_e status = VXGE_HAL_FAIL;
2645 status = vxge_hal_mrpcim_pcie_func_mode_set(vdev->devh,
2646 vdev->config.function_mode);
2647 device_printf(vdev->ndev,
2648 "function mode change %s\n",
2649 (status == VXGE_HAL_OK) ? "successful" : "failed");
2651 if (status == VXGE_HAL_OK) {
2652 vxge_hal_set_fw_api(vdev->devh, 0ULL,
2653 VXGE_HAL_API_FUNC_MODE_COMMIT,
2656 vxge_hal_get_active_config(vdev->devh,
2657 VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode,
2661 * If in MF + DP mode
2662 * if user changes to SF, change port_mode to single port mode
2664 if (((is_multi_func(vdev->config.hw_info.function_mode)) &&
2665 is_single_func(vdev->config.function_mode)) &&
2666 (active_config == VXGE_HAL_DP_NP_MODE_DUAL_PORT)) {
2667 vdev->config.port_mode =
2668 VXGE_HAL_DP_NP_MODE_SINGLE_PORT;
2670 status = vxge_port_mode_set(vdev);
2677 vxge_port_mode_set(vxge_dev_t *vdev)
2679 vxge_hal_status_e status = VXGE_HAL_FAIL;
2681 status = vxge_hal_set_port_mode(vdev->devh, vdev->config.port_mode);
2682 device_printf(vdev->ndev,
2683 "port mode change %s\n",
2684 (status == VXGE_HAL_OK) ? "successful" : "failed");
2686 if (status == VXGE_HAL_OK) {
2687 vxge_hal_set_fw_api(vdev->devh, 0ULL,
2688 VXGE_HAL_API_FUNC_MODE_COMMIT,
2691 /* Configure vpath_mapping for active-active mode only */
2692 if (vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) {
2694 status = vxge_hal_config_vpath_map(vdev->devh,
2695 VXGE_DUAL_PORT_MAP);
2697 device_printf(vdev->ndev, "dual port map change %s\n",
2698 (status == VXGE_HAL_OK) ? "successful" : "failed");
2705 vxge_port_mode_update(vxge_dev_t *vdev)
2709 vxge_hal_status_e status = VXGE_HAL_FAIL;
2711 if ((vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) &&
2712 is_single_func(vdev->config.hw_info.function_mode)) {
2714 device_printf(vdev->ndev,
2715 "Adapter in SF mode, dual port mode is not allowed\n");
2720 active_config = VXGE_DEFAULT_CONFIG_VALUE;
2721 status = vxge_hal_get_active_config(vdev->devh,
2722 VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode,
2724 if (status != VXGE_HAL_OK) {
2729 vdev->port_mode = active_config;
2730 if (vdev->config.port_mode != VXGE_DEFAULT_CONFIG_VALUE) {
2731 if (vdev->config.port_mode != vdev->port_mode) {
2732 status = vxge_port_mode_set(vdev);
2733 if (status != VXGE_HAL_OK) {
2738 vdev->port_mode = vdev->config.port_mode;
2742 active_config = VXGE_DEFAULT_CONFIG_VALUE;
2743 status = vxge_hal_get_active_config(vdev->devh,
2744 VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail,
2746 if (status != VXGE_HAL_OK) {
2751 vdev->port_failure = active_config;
2754 * active/active mode : set to NoMove
2755 * active/passive mode: set to Failover-Failback
2757 if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT)
2758 vdev->config.port_failure =
2759 VXGE_HAL_XMAC_NWIF_OnFailure_NoMove;
2761 else if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_ACTIVE_PASSIVE)
2762 vdev->config.port_failure =
2763 VXGE_HAL_XMAC_NWIF_OnFailure_OtherPortBackOnRestore;
2765 if ((vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) &&
2766 (vdev->config.port_failure != vdev->port_failure)) {
2767 status = vxge_port_behavior_on_failure_set(vdev);
2768 if (status == VXGE_HAL_OK)
2777 vxge_port_mode_get(vxge_dev_t *vdev, vxge_port_info_t *port_info)
2781 vxge_hal_status_e status = VXGE_HAL_FAIL;
2783 active_config = VXGE_DEFAULT_CONFIG_VALUE;
2784 status = vxge_hal_get_active_config(vdev->devh,
2785 VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode,
2788 if (status != VXGE_HAL_OK) {
2793 port_info->port_mode = active_config;
2795 active_config = VXGE_DEFAULT_CONFIG_VALUE;
2796 status = vxge_hal_get_active_config(vdev->devh,
2797 VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail,
2799 if (status != VXGE_HAL_OK) {
2804 port_info->port_failure = active_config;
2811 vxge_port_behavior_on_failure_set(vxge_dev_t *vdev)
2813 vxge_hal_status_e status = VXGE_HAL_FAIL;
2815 status = vxge_hal_set_behavior_on_failure(vdev->devh,
2816 vdev->config.port_failure);
2818 device_printf(vdev->ndev,
2819 "port behaviour on failure change %s\n",
2820 (status == VXGE_HAL_OK) ? "successful" : "failed");
2822 if (status == VXGE_HAL_OK)
2823 vxge_hal_set_fw_api(vdev->devh, 0ULL,
2824 VXGE_HAL_API_FUNC_MODE_COMMIT,
2831 vxge_active_port_update(vxge_dev_t *vdev)
2834 vxge_hal_status_e status = VXGE_HAL_FAIL;
2836 active_config = VXGE_DEFAULT_CONFIG_VALUE;
2837 status = vxge_hal_get_active_config(vdev->devh,
2838 VXGE_HAL_XMAC_NWIF_ActConfig_ActivePort,
2841 if (status == VXGE_HAL_OK)
2842 vdev->active_port = active_config;
2846 vxge_l2switch_mode_set(vxge_dev_t *vdev)
2848 vxge_hal_status_e status = VXGE_HAL_FAIL;
2850 status = vxge_hal_set_l2switch_mode(vdev->devh,
2851 vdev->config.l2_switch);
2853 device_printf(vdev->ndev, "L2 switch %s\n",
2854 (status == VXGE_HAL_OK) ?
2855 (vdev->config.l2_switch) ? "enable" : "disable" :
2858 if (status == VXGE_HAL_OK)
2859 vxge_hal_set_fw_api(vdev->devh, 0ULL,
2860 VXGE_HAL_API_FUNC_MODE_COMMIT,
2868 * Enable Promiscuous Mode
2871 vxge_promisc_set(vxge_dev_t *vdev)
2875 vxge_hal_vpath_h vpath_handle;
2877 if (!vdev->is_initialized)
2882 for (i = 0; i < vdev->no_of_vpath; i++) {
2883 vpath_handle = vxge_vpath_handle_get(vdev, i);
2887 if (ifp->if_flags & IFF_PROMISC)
2888 vxge_hal_vpath_promisc_enable(vpath_handle);
2890 vxge_hal_vpath_promisc_disable(vpath_handle);
2896 * Change interface MTU to a requested valid size
2899 vxge_change_mtu(vxge_dev_t *vdev, unsigned long new_mtu)
2903 if ((new_mtu < VXGE_HAL_MIN_MTU) || (new_mtu > VXGE_HAL_MAX_MTU))
2906 (vdev->ifp)->if_mtu = new_mtu;
2907 device_printf(vdev->ndev, "MTU changed to %u\n", (vdev->ifp)->if_mtu);
2909 if (vdev->is_initialized) {
2921 * Creates DMA tags for both Tx and Rx
2924 vxge_dma_tags_create(vxge_vpath_t *vpath)
2927 bus_size_t max_size, boundary;
2928 vxge_dev_t *vdev = vpath->vdev;
2929 ifnet_t ifp = vdev->ifp;
2931 max_size = ifp->if_mtu +
2932 VXGE_HAL_MAC_HEADER_MAX_SIZE +
2933 VXGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN;
2935 VXGE_BUFFER_ALIGN(max_size, 128)
2936 if (max_size <= MCLBYTES)
2937 vdev->rx_mbuf_sz = MCLBYTES;
2940 (max_size > MJUMPAGESIZE) ? MJUM9BYTES : MJUMPAGESIZE;
2942 boundary = (max_size > PAGE_SIZE) ? 0 : PAGE_SIZE;
2944 /* DMA tag for Tx */
2945 err = bus_dma_tag_create(
2946 bus_get_dma_tag(vdev->ndev),
2959 &(vpath->dma_tag_tx));
2963 /* DMA tag for Rx */
2964 err = bus_dma_tag_create(
2965 bus_get_dma_tag(vdev->ndev),
2978 &(vpath->dma_tag_rx));
2982 /* Create DMA map for this descriptor */
2983 err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT,
2984 &vpath->extra_dma_map);
2988 bus_dma_tag_destroy(vpath->dma_tag_rx);
2991 bus_dma_tag_destroy(vpath->dma_tag_tx);
2998 vxge_dma_mbuf_coalesce(bus_dma_tag_t dma_tag_tx, bus_dmamap_t dma_map,
2999 mbuf_t * m_headp, bus_dma_segment_t * dma_buffers,
3003 mbuf_t mbuf_pkt = NULL;
3006 err = bus_dmamap_load_mbuf_sg(dma_tag_tx, dma_map, *m_headp,
3007 dma_buffers, num_segs, BUS_DMA_NOWAIT);
3009 /* try to defrag, too many segments */
3010 mbuf_pkt = m_defrag(*m_headp, M_NOWAIT);
3011 if (mbuf_pkt == NULL) {
3015 *m_headp = mbuf_pkt;
3024 vxge_device_hw_info_get(vxge_dev_t *vdev)
3028 u32 max_supported_vpath = 0;
3030 vxge_firmware_upgrade_e fw_option;
3032 vxge_hal_status_e status = VXGE_HAL_OK;
3033 vxge_hal_device_hw_info_t *hw_info;
3035 status = vxge_hal_device_hw_info_get(vdev->pdev,
3036 (pci_reg_h) vdev->pdev->reg_map[0],
3037 (u8 *) vdev->pdev->bar_info[0],
3038 &vdev->config.hw_info);
3040 if (status != VXGE_HAL_OK)
3043 hw_info = &vdev->config.hw_info;
3045 vpath_mask = hw_info->vpath_mask;
3046 if (vpath_mask == 0) {
3047 device_printf(vdev->ndev, "No vpaths available in device\n");
3051 fw_option = vdev->config.fw_option;
3053 /* Check how many vpaths are available */
3054 for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
3055 if (!((vpath_mask) & mBIT(i)))
3057 max_supported_vpath++;
3060 vdev->max_supported_vpath = max_supported_vpath;
3061 status = vxge_hal_device_is_privileged(hw_info->host_type,
3063 vdev->is_privilaged = (status == VXGE_HAL_OK) ? TRUE : FALSE;
3065 vdev->hw_fw_version = VXGE_FW_VERSION(
3066 hw_info->fw_version.major,
3067 hw_info->fw_version.minor,
3068 hw_info->fw_version.build);
3071 VXGE_FW_MAJ_MIN_VERSION(hw_info->fw_version.major,
3072 hw_info->fw_version.minor);
3074 if ((fw_option >= VXGE_FW_UPGRADE_FORCE) ||
3075 (vdev->hw_fw_version != VXGE_DRV_FW_VERSION)) {
3077 /* For fw_ver 1.8.1 and above ignore build number. */
3078 if ((fw_option == VXGE_FW_UPGRADE_ALL) &&
3079 ((vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 1)) &&
3080 (fw_ver_maj_min == VXGE_DRV_FW_MAJ_MIN_VERSION))) {
3084 if (vdev->hw_fw_version < VXGE_BASE_FW_VERSION) {
3085 device_printf(vdev->ndev,
3086 "Upgrade driver through vxge_update, "
3087 "Unable to load the driver.\n");
3090 vdev->fw_upgrade = TRUE;
3101 * vxge_device_hw_info_print
3102 * Print device and driver information
3105 vxge_device_hw_info_print(vxge_dev_t *vdev)
3109 struct sysctl_ctx_list *ctx;
3110 struct sysctl_oid_list *children;
3111 char pmd_type[2][VXGE_PMD_INFO_LEN];
3113 vxge_hal_device_t *hldev;
3114 vxge_hal_device_hw_info_t *hw_info;
3115 vxge_hal_device_pmd_info_t *pmd_port;
3120 ctx = device_get_sysctl_ctx(ndev);
3121 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ndev));
3123 hw_info = &(vdev->config.hw_info);
3125 snprintf(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION],
3126 sizeof(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]),
3127 "%d.%d.%d.%d", XGELL_VERSION_MAJOR, XGELL_VERSION_MINOR,
3128 XGELL_VERSION_FIX, XGELL_VERSION_BUILD);
3130 /* Print PCI-e bus type/speed/width info */
3131 snprintf(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO],
3132 sizeof(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]),
3133 "x%d", hldev->link_width);
3135 if (hldev->link_width <= VXGE_HAL_PCI_E_LINK_WIDTH_X4)
3136 device_printf(ndev, "For optimal performance a x8 "
3137 "PCI-Express slot is required.\n");
3139 vxge_null_terminate((char *) hw_info->serial_number,
3140 sizeof(hw_info->serial_number));
3142 vxge_null_terminate((char *) hw_info->part_number,
3143 sizeof(hw_info->part_number));
3145 snprintf(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO],
3146 sizeof(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]),
3147 "%s", hw_info->serial_number);
3149 snprintf(vdev->config.nic_attr[VXGE_PRINT_PART_NO],
3150 sizeof(vdev->config.nic_attr[VXGE_PRINT_PART_NO]),
3151 "%s", hw_info->part_number);
3153 snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION],
3154 sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]),
3155 "%s", hw_info->fw_version.version);
3157 snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_DATE],
3158 sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_DATE]),
3159 "%s", hw_info->fw_date.date);
3161 pmd_port = &(hw_info->pmd_port0);
3162 for (i = 0; i < hw_info->ports; i++) {
3164 vxge_pmd_port_type_get(vdev, pmd_port->type,
3165 pmd_type[i], sizeof(pmd_type[i]));
3167 strncpy(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i],
3168 "vendor=??, sn=??, pn=??, type=??",
3169 sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i]));
3171 vxge_null_terminate(pmd_port->vendor, sizeof(pmd_port->vendor));
3172 if (strlen(pmd_port->vendor) == 0) {
3173 pmd_port = &(hw_info->pmd_port1);
3177 vxge_null_terminate(pmd_port->ser_num,
3178 sizeof(pmd_port->ser_num));
3180 vxge_null_terminate(pmd_port->part_num,
3181 sizeof(pmd_port->part_num));
3183 snprintf(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i],
3184 sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i]),
3185 "vendor=%s, sn=%s, pn=%s, type=%s",
3186 pmd_port->vendor, pmd_port->ser_num,
3187 pmd_port->part_num, pmd_type[i]);
3189 pmd_port = &(hw_info->pmd_port1);
3192 switch (hw_info->function_mode) {
3193 case VXGE_HAL_PCIE_FUNC_MODE_SF1_VP17:
3194 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3195 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3196 "%s %d %s", "Single Function - 1 function(s)",
3197 vdev->max_supported_vpath, "VPath(s)/function");
3200 case VXGE_HAL_PCIE_FUNC_MODE_MF2_VP8:
3201 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3202 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3203 "%s %d %s", "Multi Function - 2 function(s)",
3204 vdev->max_supported_vpath, "VPath(s)/function");
3207 case VXGE_HAL_PCIE_FUNC_MODE_MF4_VP4:
3208 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3209 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3210 "%s %d %s", "Multi Function - 4 function(s)",
3211 vdev->max_supported_vpath, "VPath(s)/function");
3214 case VXGE_HAL_PCIE_FUNC_MODE_MF8_VP2:
3215 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3216 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3217 "%s %d %s", "Multi Function - 8 function(s)",
3218 vdev->max_supported_vpath, "VPath(s)/function");
3221 case VXGE_HAL_PCIE_FUNC_MODE_MF8P_VP2:
3222 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3223 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3224 "%s %d %s", "Multi Function (DirectIO) - 8 function(s)",
3225 vdev->max_supported_vpath, "VPath(s)/function");
3229 snprintf(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE],
3230 sizeof(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]),
3231 "%s", ((vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) ?
3234 snprintf(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT],
3235 sizeof(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]),
3236 "%d", vdev->no_of_vpath);
3238 snprintf(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE],
3239 sizeof(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]),
3240 "%u", vdev->ifp->if_mtu);
3242 snprintf(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE],
3243 sizeof(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]),
3244 "%s", ((vdev->config.lro_enable) ? "Enabled" : "Disabled"));
3246 snprintf(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE],
3247 sizeof(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]),
3248 "%s", ((vdev->config.rth_enable) ? "Enabled" : "Disabled"));
3250 snprintf(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE],
3251 sizeof(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]),
3252 "%s", ((vdev->ifp->if_capenable & IFCAP_TSO4) ?
3253 "Enabled" : "Disabled"));
3255 snprintf(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE],
3256 sizeof(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]),
3257 "%s", ((hw_info->ports == 1) ? "Single Port" : "Dual Port"));
3259 if (vdev->is_privilaged) {
3261 if (hw_info->ports > 1) {
3263 snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE],
3264 sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]),
3265 "%s", vxge_port_mode[vdev->port_mode]);
3267 if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT)
3268 snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE],
3269 sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]),
3270 "%s", vxge_port_failure[vdev->port_failure]);
3272 vxge_active_port_update(vdev);
3273 snprintf(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT],
3274 sizeof(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]),
3275 "%lld", vdev->active_port);
3278 if (!is_single_func(hw_info->function_mode)) {
3279 snprintf(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE],
3280 sizeof(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]),
3281 "%s", ((vdev->l2_switch) ? "Enabled" : "Disabled"));
3285 device_printf(ndev, "Driver version\t: %s\n",
3286 vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]);
3288 device_printf(ndev, "Serial number\t: %s\n",
3289 vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]);
3291 device_printf(ndev, "Part number\t: %s\n",
3292 vdev->config.nic_attr[VXGE_PRINT_PART_NO]);
3294 device_printf(ndev, "Firmware version\t: %s\n",
3295 vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]);
3297 device_printf(ndev, "Firmware date\t: %s\n",
3298 vdev->config.nic_attr[VXGE_PRINT_FW_DATE]);
3300 device_printf(ndev, "Link width\t: %s\n",
3301 vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]);
3303 if (vdev->is_privilaged) {
3304 device_printf(ndev, "Function mode\t: %s\n",
3305 vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]);
3308 device_printf(ndev, "Interrupt type\t: %s\n",
3309 vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]);
3311 device_printf(ndev, "VPath(s) opened\t: %s\n",
3312 vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]);
3314 device_printf(ndev, "Adapter Type\t: %s\n",
3315 vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]);
3317 device_printf(ndev, "PMD Port 0\t: %s\n",
3318 vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0]);
3320 if (hw_info->ports > 1) {
3321 device_printf(ndev, "PMD Port 1\t: %s\n",
3322 vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1]);
3324 if (vdev->is_privilaged) {
3325 device_printf(ndev, "Port Mode\t: %s\n",
3326 vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]);
3328 if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT)
3329 device_printf(ndev, "Port Failure\t: %s\n",
3330 vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]);
3332 device_printf(vdev->ndev, "Active Port\t: %s\n",
3333 vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]);
3337 if (vdev->is_privilaged && !is_single_func(hw_info->function_mode)) {
3338 device_printf(vdev->ndev, "L2 Switch\t: %s\n",
3339 vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]);
3342 device_printf(ndev, "MTU is %s\n",
3343 vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]);
3345 device_printf(ndev, "LRO %s\n",
3346 vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]);
3348 device_printf(ndev, "RTH %s\n",
3349 vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]);
3351 device_printf(ndev, "TSO %s\n",
3352 vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]);
3354 SYSCTL_ADD_STRING(ctx, children,
3355 OID_AUTO, "Driver version", CTLFLAG_RD,
3356 vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION],
3357 0, "Driver version");
3359 SYSCTL_ADD_STRING(ctx, children,
3360 OID_AUTO, "Serial number", CTLFLAG_RD,
3361 vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO],
3362 0, "Serial number");
3364 SYSCTL_ADD_STRING(ctx, children,
3365 OID_AUTO, "Part number", CTLFLAG_RD,
3366 vdev->config.nic_attr[VXGE_PRINT_PART_NO],
3369 SYSCTL_ADD_STRING(ctx, children,
3370 OID_AUTO, "Firmware version", CTLFLAG_RD,
3371 vdev->config.nic_attr[VXGE_PRINT_FW_VERSION],
3372 0, "Firmware version");
3374 SYSCTL_ADD_STRING(ctx, children,
3375 OID_AUTO, "Firmware date", CTLFLAG_RD,
3376 vdev->config.nic_attr[VXGE_PRINT_FW_DATE],
3377 0, "Firmware date");
3379 SYSCTL_ADD_STRING(ctx, children,
3380 OID_AUTO, "Link width", CTLFLAG_RD,
3381 vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO],
3384 if (vdev->is_privilaged) {
3385 SYSCTL_ADD_STRING(ctx, children,
3386 OID_AUTO, "Function mode", CTLFLAG_RD,
3387 vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3388 0, "Function mode");
3391 SYSCTL_ADD_STRING(ctx, children,
3392 OID_AUTO, "Interrupt type", CTLFLAG_RD,
3393 vdev->config.nic_attr[VXGE_PRINT_INTR_MODE],
3394 0, "Interrupt type");
3396 SYSCTL_ADD_STRING(ctx, children,
3397 OID_AUTO, "VPath(s) opened", CTLFLAG_RD,
3398 vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT],
3399 0, "VPath(s) opened");
3401 SYSCTL_ADD_STRING(ctx, children,
3402 OID_AUTO, "Adapter Type", CTLFLAG_RD,
3403 vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE],
3406 SYSCTL_ADD_STRING(ctx, children,
3407 OID_AUTO, "pmd port 0", CTLFLAG_RD,
3408 vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0],
3411 if (hw_info->ports > 1) {
3413 SYSCTL_ADD_STRING(ctx, children,
3414 OID_AUTO, "pmd port 1", CTLFLAG_RD,
3415 vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1],
3418 if (vdev->is_privilaged) {
3419 SYSCTL_ADD_STRING(ctx, children,
3420 OID_AUTO, "Port Mode", CTLFLAG_RD,
3421 vdev->config.nic_attr[VXGE_PRINT_PORT_MODE],
3424 if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT)
3425 SYSCTL_ADD_STRING(ctx, children,
3426 OID_AUTO, "Port Failure", CTLFLAG_RD,
3427 vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE],
3430 SYSCTL_ADD_STRING(ctx, children,
3431 OID_AUTO, "L2 Switch", CTLFLAG_RD,
3432 vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE],
3437 SYSCTL_ADD_STRING(ctx, children,
3438 OID_AUTO, "LRO mode", CTLFLAG_RD,
3439 vdev->config.nic_attr[VXGE_PRINT_LRO_MODE],
3442 SYSCTL_ADD_STRING(ctx, children,
3443 OID_AUTO, "RTH mode", CTLFLAG_RD,
3444 vdev->config.nic_attr[VXGE_PRINT_RTH_MODE],
3447 SYSCTL_ADD_STRING(ctx, children,
3448 OID_AUTO, "TSO mode", CTLFLAG_RD,
3449 vdev->config.nic_attr[VXGE_PRINT_TSO_MODE],
3454 vxge_pmd_port_type_get(vxge_dev_t *vdev, u32 port_type,
3455 char *ifm_name, u8 ifm_len)
3458 vdev->ifm_optics = IFM_UNKNOWN;
3460 switch (port_type) {
3461 case VXGE_HAL_DEVICE_PMD_TYPE_10G_SR:
3462 vdev->ifm_optics = IFM_10G_SR;
3463 strlcpy(ifm_name, "10GbE SR", ifm_len);
3466 case VXGE_HAL_DEVICE_PMD_TYPE_10G_LR:
3467 vdev->ifm_optics = IFM_10G_LR;
3468 strlcpy(ifm_name, "10GbE LR", ifm_len);
3471 case VXGE_HAL_DEVICE_PMD_TYPE_10G_LRM:
3472 vdev->ifm_optics = IFM_10G_LRM;
3473 strlcpy(ifm_name, "10GbE LRM", ifm_len);
3476 case VXGE_HAL_DEVICE_PMD_TYPE_10G_DIRECT:
3477 vdev->ifm_optics = IFM_10G_TWINAX;
3478 strlcpy(ifm_name, "10GbE DA (Direct Attached)", ifm_len);
3481 case VXGE_HAL_DEVICE_PMD_TYPE_10G_CX4:
3482 vdev->ifm_optics = IFM_10G_CX4;
3483 strlcpy(ifm_name, "10GbE CX4", ifm_len);
3486 case VXGE_HAL_DEVICE_PMD_TYPE_10G_BASE_T:
3487 #if __FreeBSD_version >= 800000
3488 vdev->ifm_optics = IFM_10G_T;
3490 strlcpy(ifm_name, "10GbE baseT", ifm_len);
3493 case VXGE_HAL_DEVICE_PMD_TYPE_10G_OTHER:
3494 strlcpy(ifm_name, "10GbE Other", ifm_len);
3497 case VXGE_HAL_DEVICE_PMD_TYPE_1G_SX:
3498 vdev->ifm_optics = IFM_1000_SX;
3499 strlcpy(ifm_name, "1GbE SX", ifm_len);
3502 case VXGE_HAL_DEVICE_PMD_TYPE_1G_LX:
3503 vdev->ifm_optics = IFM_1000_LX;
3504 strlcpy(ifm_name, "1GbE LX", ifm_len);
3507 case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX:
3508 vdev->ifm_optics = IFM_1000_CX;
3509 strlcpy(ifm_name, "1GbE CX", ifm_len);
3512 case VXGE_HAL_DEVICE_PMD_TYPE_1G_BASE_T:
3513 vdev->ifm_optics = IFM_1000_T;
3514 strlcpy(ifm_name, "1GbE baseT", ifm_len);
3517 case VXGE_HAL_DEVICE_PMD_TYPE_1G_DIRECT:
3518 strlcpy(ifm_name, "1GbE DA (Direct Attached)",
3522 case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX4:
3523 strlcpy(ifm_name, "1GbE CX4", ifm_len);
3526 case VXGE_HAL_DEVICE_PMD_TYPE_1G_OTHER:
3527 strlcpy(ifm_name, "1GbE Other", ifm_len);
3531 case VXGE_HAL_DEVICE_PMD_TYPE_UNKNOWN:
3532 strlcpy(ifm_name, "UNSUP", ifm_len);
3538 vxge_ring_length_get(u32 buffer_mode)
3540 return (VXGE_DEFAULT_RING_BLOCK *
3541 vxge_hal_ring_rxds_per_block_get(buffer_mode));
3545 * Removes trailing spaces padded
3546 * and NULL terminates strings
3549 vxge_null_terminate(char *str, size_t len)
3552 while (*str && (*str != ' ') && (len != 0))
3562 * Callback to control the device
3565 vxge_ioctl(ifnet_t ifp, u_long command, caddr_t data)
3568 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
3569 struct ifreq *ifr = (struct ifreq *) data;
3571 if (!vdev->is_active)
3575 /* Set/Get ifnet address */
3578 ether_ioctl(ifp, command, data);
3581 /* Set Interface MTU */
3583 err = vxge_change_mtu(vdev, (unsigned long)ifr->ifr_mtu);
3586 /* Set Interface Flags */
3588 VXGE_DRV_LOCK(vdev);
3589 if (ifp->if_flags & IFF_UP) {
3590 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3591 if ((ifp->if_flags ^ vdev->if_flags) &
3592 (IFF_PROMISC | IFF_ALLMULTI))
3593 vxge_promisc_set(vdev);
3595 vxge_init_locked(vdev);
3598 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3599 vxge_stop_locked(vdev);
3601 vdev->if_flags = ifp->if_flags;
3602 VXGE_DRV_UNLOCK(vdev);
3605 /* Add/delete multicast address */
3610 /* Get/Set Interface Media */
3613 err = ifmedia_ioctl(ifp, ifr, &vdev->media, command);
3616 /* Set Capabilities */
3618 VXGE_DRV_LOCK(vdev);
3619 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3621 if (mask & IFCAP_TXCSUM) {
3622 ifp->if_capenable ^= IFCAP_TXCSUM;
3623 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3625 if ((ifp->if_capenable & IFCAP_TSO) &&
3626 !(ifp->if_capenable & IFCAP_TXCSUM)) {
3628 ifp->if_capenable &= ~IFCAP_TSO;
3629 ifp->if_hwassist &= ~CSUM_TSO;
3630 if_printf(ifp, "TSO Disabled\n");
3633 if (mask & IFCAP_RXCSUM)
3634 ifp->if_capenable ^= IFCAP_RXCSUM;
3636 if (mask & IFCAP_TSO4) {
3637 ifp->if_capenable ^= IFCAP_TSO4;
3639 if (ifp->if_capenable & IFCAP_TSO) {
3640 if (ifp->if_capenable & IFCAP_TXCSUM) {
3641 ifp->if_hwassist |= CSUM_TSO;
3642 if_printf(ifp, "TSO Enabled\n");
3644 ifp->if_capenable &= ~IFCAP_TSO;
3645 ifp->if_hwassist &= ~CSUM_TSO;
3647 "Enable tx checksum offload \
3652 ifp->if_hwassist &= ~CSUM_TSO;
3653 if_printf(ifp, "TSO Disabled\n");
3656 if (mask & IFCAP_LRO)
3657 ifp->if_capenable ^= IFCAP_LRO;
3659 if (mask & IFCAP_VLAN_HWTAGGING)
3660 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3662 if (mask & IFCAP_VLAN_MTU)
3663 ifp->if_capenable ^= IFCAP_VLAN_MTU;
3665 if (mask & IFCAP_VLAN_HWCSUM)
3666 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
3668 #if __FreeBSD_version >= 800000
3669 if (mask & IFCAP_VLAN_HWTSO)
3670 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3673 #if defined(VLAN_CAPABILITIES)
3674 VLAN_CAPABILITIES(ifp);
3677 VXGE_DRV_UNLOCK(vdev);
3680 case SIOCGPRIVATE_0:
3681 VXGE_DRV_LOCK(vdev);
3682 err = vxge_ioctl_stats(vdev, ifr);
3683 VXGE_DRV_UNLOCK(vdev);
3686 case SIOCGPRIVATE_1:
3687 VXGE_DRV_LOCK(vdev);
3688 err = vxge_ioctl_regs(vdev, ifr);
3689 VXGE_DRV_UNLOCK(vdev);
3693 err = ether_ioctl(ifp, command, data);
3702 * IOCTL to get registers
3705 vxge_ioctl_regs(vxge_dev_t *vdev, struct ifreq *ifr)
3709 u32 offset, reqd_size = 0;
3710 int i, err = EINVAL;
3712 char *command = ifr_data_get_ptr(ifr);
3713 void *reg_info = ifr_data_get_ptr(ifr);
3715 vxge_vpath_t *vpath;
3716 vxge_hal_status_e status = VXGE_HAL_OK;
3717 vxge_hal_mgmt_reg_type_e regs_type;
3720 case vxge_hal_mgmt_reg_type_pcicfgmgmt:
3721 if (vdev->is_privilaged) {
3722 reqd_size = sizeof(vxge_hal_pcicfgmgmt_reg_t);
3723 regs_type = vxge_hal_mgmt_reg_type_pcicfgmgmt;
3727 case vxge_hal_mgmt_reg_type_mrpcim:
3728 if (vdev->is_privilaged) {
3729 reqd_size = sizeof(vxge_hal_mrpcim_reg_t);
3730 regs_type = vxge_hal_mgmt_reg_type_mrpcim;
3734 case vxge_hal_mgmt_reg_type_srpcim:
3735 if (vdev->is_privilaged) {
3736 reqd_size = sizeof(vxge_hal_srpcim_reg_t);
3737 regs_type = vxge_hal_mgmt_reg_type_srpcim;
3741 case vxge_hal_mgmt_reg_type_memrepair:
3742 if (vdev->is_privilaged) {
3743 /* reqd_size = sizeof(vxge_hal_memrepair_reg_t); */
3744 regs_type = vxge_hal_mgmt_reg_type_memrepair;
3748 case vxge_hal_mgmt_reg_type_legacy:
3749 reqd_size = sizeof(vxge_hal_legacy_reg_t);
3750 regs_type = vxge_hal_mgmt_reg_type_legacy;
3753 case vxge_hal_mgmt_reg_type_toc:
3754 reqd_size = sizeof(vxge_hal_toc_reg_t);
3755 regs_type = vxge_hal_mgmt_reg_type_toc;
3758 case vxge_hal_mgmt_reg_type_common:
3759 reqd_size = sizeof(vxge_hal_common_reg_t);
3760 regs_type = vxge_hal_mgmt_reg_type_common;
3763 case vxge_hal_mgmt_reg_type_vpmgmt:
3764 reqd_size = sizeof(vxge_hal_vpmgmt_reg_t);
3765 regs_type = vxge_hal_mgmt_reg_type_vpmgmt;
3766 vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]);
3767 vp_id = vpath->vp_id;
3770 case vxge_hal_mgmt_reg_type_vpath:
3771 reqd_size = sizeof(vxge_hal_vpath_reg_t);
3772 regs_type = vxge_hal_mgmt_reg_type_vpath;
3773 vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]);
3774 vp_id = vpath->vp_id;
3777 case VXGE_GET_VPATH_COUNT:
3778 *((u32 *) reg_info) = vdev->no_of_vpath;
3788 for (i = 0, offset = 0; offset < reqd_size;
3789 i++, offset += 0x0008) {
3791 status = vxge_hal_mgmt_reg_read(vdev->devh, regs_type,
3792 vp_id, offset, &value);
3794 err = (status != VXGE_HAL_OK) ? EINVAL : 0;
3798 *((u64 *) ((u64 *) reg_info + i)) = value;
3806 * IOCTL to get statistics
3809 vxge_ioctl_stats(vxge_dev_t *vdev, struct ifreq *ifr)
3811 int i, retsize, err = EINVAL;
3814 vxge_vpath_t *vpath;
3815 vxge_bw_info_t *bw_info;
3816 vxge_port_info_t *port_info;
3817 vxge_drv_stats_t *drv_stat;
3819 char *buffer = NULL;
3820 char *command = ifr_data_get_ptr(ifr);
3821 vxge_hal_status_e status = VXGE_HAL_OK;
3824 case VXGE_GET_PCI_CONF:
3825 bufsize = VXGE_STATS_BUFFER_SIZE;
3826 buffer = (char *) vxge_mem_alloc(bufsize);
3827 if (buffer != NULL) {
3828 status = vxge_hal_aux_pci_config_read(vdev->devh,
3829 bufsize, buffer, &retsize);
3830 if (status == VXGE_HAL_OK)
3831 err = copyout(buffer, ifr_data_get_ptr(ifr),
3834 device_printf(vdev->ndev,
3835 "failed pciconfig statistics query\n");
3837 vxge_mem_free(buffer, bufsize);
3841 case VXGE_GET_MRPCIM_STATS:
3842 if (!vdev->is_privilaged)
3845 bufsize = VXGE_STATS_BUFFER_SIZE;
3846 buffer = (char *) vxge_mem_alloc(bufsize);
3847 if (buffer != NULL) {
3848 status = vxge_hal_aux_stats_mrpcim_read(vdev->devh,
3849 bufsize, buffer, &retsize);
3850 if (status == VXGE_HAL_OK)
3851 err = copyout(buffer, ifr_data_get_ptr(ifr),
3854 device_printf(vdev->ndev,
3855 "failed mrpcim statistics query\n");
3857 vxge_mem_free(buffer, bufsize);
3861 case VXGE_GET_DEVICE_STATS:
3862 bufsize = VXGE_STATS_BUFFER_SIZE;
3863 buffer = (char *) vxge_mem_alloc(bufsize);
3864 if (buffer != NULL) {
3865 status = vxge_hal_aux_stats_device_read(vdev->devh,
3866 bufsize, buffer, &retsize);
3867 if (status == VXGE_HAL_OK)
3868 err = copyout(buffer, ifr_data_get_ptr(ifr),
3871 device_printf(vdev->ndev,
3872 "failed device statistics query\n");
3874 vxge_mem_free(buffer, bufsize);
3878 case VXGE_GET_DEVICE_HWINFO:
3879 bufsize = sizeof(vxge_device_hw_info_t);
3880 buffer = (char *) vxge_mem_alloc(bufsize);
3881 if (buffer != NULL) {
3883 &(((vxge_device_hw_info_t *) buffer)->hw_info),
3884 &vdev->config.hw_info,
3885 sizeof(vxge_hal_device_hw_info_t));
3887 ((vxge_device_hw_info_t *) buffer)->port_mode =
3890 ((vxge_device_hw_info_t *) buffer)->port_failure =
3893 err = copyout(buffer, ifr_data_get_ptr(ifr), bufsize);
3895 device_printf(vdev->ndev,
3896 "failed device hardware info query\n");
3898 vxge_mem_free(buffer, bufsize);
3902 case VXGE_GET_DRIVER_STATS:
3903 bufsize = sizeof(vxge_drv_stats_t) * vdev->no_of_vpath;
3904 drv_stat = (vxge_drv_stats_t *) vxge_mem_alloc(bufsize);
3905 if (drv_stat != NULL) {
3906 for (i = 0; i < vdev->no_of_vpath; i++) {
3907 vpath = &(vdev->vpaths[i]);
3909 vpath->driver_stats.rx_lro_queued +=
3910 vpath->lro.lro_queued;
3912 vpath->driver_stats.rx_lro_flushed +=
3913 vpath->lro.lro_flushed;
3915 vxge_os_memcpy(&drv_stat[i],
3916 &(vpath->driver_stats),
3917 sizeof(vxge_drv_stats_t));
3920 err = copyout(drv_stat, ifr_data_get_ptr(ifr), bufsize);
3922 device_printf(vdev->ndev,
3923 "failed driver statistics query\n");
3925 vxge_mem_free(drv_stat, bufsize);
3929 case VXGE_GET_BANDWIDTH:
3930 bw_info = ifr_data_get_ptr(ifr);
3932 if ((vdev->config.hw_info.func_id != 0) &&
3933 (vdev->hw_fw_version < VXGE_FW_VERSION(1, 8, 0)))
3936 if (vdev->config.hw_info.func_id != 0)
3937 bw_info->func_id = vdev->config.hw_info.func_id;
3939 status = vxge_bw_priority_get(vdev, bw_info);
3940 if (status != VXGE_HAL_OK)
3943 err = copyout(bw_info, ifr_data_get_ptr(ifr),
3944 sizeof(vxge_bw_info_t));
3947 case VXGE_SET_BANDWIDTH:
3948 if (vdev->is_privilaged)
3949 err = vxge_bw_priority_set(vdev, ifr);
3952 case VXGE_SET_PORT_MODE:
3953 if (vdev->is_privilaged) {
3954 if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) {
3955 port_info = ifr_data_get_ptr(ifr);
3956 vdev->config.port_mode = port_info->port_mode;
3957 err = vxge_port_mode_update(vdev);
3959 err = VXGE_HAL_FAIL;
3962 device_printf(vdev->ndev,
3963 "PLEASE POWER CYCLE THE SYSTEM\n");
3969 case VXGE_GET_PORT_MODE:
3970 if (vdev->is_privilaged) {
3971 if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) {
3972 port_info = ifr_data_get_ptr(ifr);
3973 err = vxge_port_mode_get(vdev, port_info);
3974 if (err == VXGE_HAL_OK) {
3975 err = copyout(port_info,
3976 ifr_data_get_ptr(ifr),
3977 sizeof(vxge_port_info_t));
3991 vxge_bw_priority_config(vxge_dev_t *vdev)
3996 for (i = 0; i < vdev->no_of_func; i++) {
3997 err = vxge_bw_priority_update(vdev, i, TRUE);
4006 vxge_bw_priority_set(vxge_dev_t *vdev, struct ifreq *ifr)
4010 vxge_bw_info_t *bw_info;
4012 bw_info = ifr_data_get_ptr(ifr);
4013 func_id = bw_info->func_id;
4015 vdev->config.bw_info[func_id].priority = bw_info->priority;
4016 vdev->config.bw_info[func_id].bandwidth = bw_info->bandwidth;
4018 err = vxge_bw_priority_update(vdev, func_id, FALSE);
4024 vxge_bw_priority_update(vxge_dev_t *vdev, u32 func_id, bool binit)
4027 u32 bandwidth, priority, vpath_count;
4028 u64 vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS];
4030 vxge_hal_device_t *hldev;
4031 vxge_hal_vp_config_t *vp_config;
4032 vxge_hal_status_e status = VXGE_HAL_OK;
4036 status = vxge_hal_get_vpath_list(vdev->devh, func_id,
4037 vpath_list, &vpath_count);
4039 if (status != VXGE_HAL_OK)
4042 for (i = 0; i < vpath_count; i++) {
4043 vp_config = &(hldev->config.vp_config[vpath_list[i]]);
4045 /* Configure Bandwidth */
4046 if (vdev->config.bw_info[func_id].bandwidth !=
4047 VXGE_HAL_VPATH_BW_LIMIT_DEFAULT) {
4050 bandwidth = vdev->config.bw_info[func_id].bandwidth;
4051 if (bandwidth < VXGE_HAL_VPATH_BW_LIMIT_MIN ||
4052 bandwidth > VXGE_HAL_VPATH_BW_LIMIT_MAX) {
4054 bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT;
4056 vp_config->bandwidth = bandwidth;
4060 * If b/w limiting is enabled on any of the
4061 * VFs, then for remaining VFs set the priority to 3
4062 * and b/w limiting to max i.e 10 Gb)
4064 if (vp_config->bandwidth == VXGE_HAL_VPATH_BW_LIMIT_DEFAULT)
4065 vp_config->bandwidth = VXGE_HAL_VPATH_BW_LIMIT_MAX;
4067 if (binit && vdev->config.low_latency) {
4069 vdev->config.bw_info[func_id].priority =
4070 VXGE_DEFAULT_VPATH_PRIORITY_HIGH;
4073 /* Configure Priority */
4074 if (vdev->config.bw_info[func_id].priority !=
4075 VXGE_HAL_VPATH_PRIORITY_DEFAULT) {
4078 priority = vdev->config.bw_info[func_id].priority;
4079 if (priority < VXGE_HAL_VPATH_PRIORITY_MIN ||
4080 priority > VXGE_HAL_VPATH_PRIORITY_MAX) {
4082 priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT;
4084 vp_config->priority = priority;
4086 } else if (vdev->config.low_latency) {
4088 vp_config->priority = VXGE_DEFAULT_VPATH_PRIORITY_LOW;
4092 status = vxge_hal_rx_bw_priority_set(vdev->devh,
4094 if (status != VXGE_HAL_OK)
4097 if (vpath_list[i] < VXGE_HAL_TX_BW_VPATH_LIMIT) {
4098 status = vxge_hal_tx_bw_priority_set(
4099 vdev->devh, vpath_list[i]);
4100 if (status != VXGE_HAL_OK)
4106 return ((status == VXGE_HAL_OK) ? 0 : EINVAL);
4110 * vxge_intr_coalesce_tx
4111 * Changes interrupt coalescing if the interrupts are not within a range
4112 * Return Value: Nothing
4115 vxge_intr_coalesce_tx(vxge_vpath_t *vpath)
4119 if (!vpath->tx_intr_coalesce)
4122 vpath->tx_interrupts++;
4123 if (ticks > vpath->tx_ticks + hz/100) {
4125 vpath->tx_ticks = ticks;
4126 timer = vpath->tti_rtimer_val;
4127 if (vpath->tx_interrupts > VXGE_MAX_TX_INTERRUPT_COUNT) {
4128 if (timer != VXGE_TTI_RTIMER_ADAPT_VAL) {
4129 vpath->tti_rtimer_val =
4130 VXGE_TTI_RTIMER_ADAPT_VAL;
4132 vxge_hal_vpath_dynamic_tti_rtimer_set(
4133 vpath->handle, vpath->tti_rtimer_val);
4137 vpath->tti_rtimer_val = 0;
4138 vxge_hal_vpath_dynamic_tti_rtimer_set(
4139 vpath->handle, vpath->tti_rtimer_val);
4142 vpath->tx_interrupts = 0;
4147 * vxge_intr_coalesce_rx
4148 * Changes interrupt coalescing if the interrupts are not within a range
4149 * Return Value: Nothing
4152 vxge_intr_coalesce_rx(vxge_vpath_t *vpath)
4156 if (!vpath->rx_intr_coalesce)
4159 vpath->rx_interrupts++;
4160 if (ticks > vpath->rx_ticks + hz/100) {
4162 vpath->rx_ticks = ticks;
4163 timer = vpath->rti_rtimer_val;
4165 if (vpath->rx_interrupts > VXGE_MAX_RX_INTERRUPT_COUNT) {
4166 if (timer != VXGE_RTI_RTIMER_ADAPT_VAL) {
4167 vpath->rti_rtimer_val =
4168 VXGE_RTI_RTIMER_ADAPT_VAL;
4170 vxge_hal_vpath_dynamic_rti_rtimer_set(
4171 vpath->handle, vpath->rti_rtimer_val);
4175 vpath->rti_rtimer_val = 0;
4176 vxge_hal_vpath_dynamic_rti_rtimer_set(
4177 vpath->handle, vpath->rti_rtimer_val);
4180 vpath->rx_interrupts = 0;
4185 * vxge_methods FreeBSD device interface entry points
4187 static device_method_t vxge_methods[] = {
4188 DEVMETHOD(device_probe, vxge_probe),
4189 DEVMETHOD(device_attach, vxge_attach),
4190 DEVMETHOD(device_detach, vxge_detach),
4191 DEVMETHOD(device_shutdown, vxge_shutdown),
4196 static driver_t vxge_driver = {
4197 "vxge", vxge_methods, sizeof(vxge_dev_t),
4200 static devclass_t vxge_devclass;
4202 DRIVER_MODULE(vxge, pci, vxge_driver, vxge_devclass, 0, 0);