1 /******************************************************************************
3 Copyright (c) 2013-2020, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
38 ixl_configure_tx_itr(struct ixl_pf *pf)
40 struct i40e_hw *hw = &pf->hw;
41 struct ixl_vsi *vsi = &pf->vsi;
42 struct ixl_tx_queue *que = vsi->tx_queues;
44 vsi->tx_itr_setting = pf->tx_itr;
46 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
47 struct tx_ring *txr = &que->txr;
49 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
51 txr->itr = vsi->tx_itr_setting;
52 txr->latency = IXL_AVE_LATENCY;
57 ixl_configure_rx_itr(struct ixl_pf *pf)
59 struct i40e_hw *hw = &pf->hw;
60 struct ixl_vsi *vsi = &pf->vsi;
61 struct ixl_rx_queue *que = vsi->rx_queues;
63 vsi->rx_itr_setting = pf->rx_itr;
65 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
66 struct rx_ring *rxr = &que->rxr;
68 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
70 rxr->itr = vsi->rx_itr_setting;
71 rxr->latency = IXL_AVE_LATENCY;
78 struct ixl_pf *pf = arg;
79 struct i40e_hw *hw = &pf->hw;
80 struct ixl_vsi *vsi = &pf->vsi;
81 struct ixl_rx_queue *que = vsi->rx_queues;
86 /* Clear PBA at start of ISR if using legacy interrupts */
87 if (vsi->shared->isc_intr == IFLIB_INTR_LEGACY)
88 wr32(hw, I40E_PFINT_DYN_CTL0,
89 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
90 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
92 icr0 = rd32(hw, I40E_PFINT_ICR0);
96 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
97 iflib_iov_intr_deferred(vsi->ctx);
100 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
101 iflib_admin_intr_deferred(vsi->ctx);
103 ixl_enable_intr0(hw);
105 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
106 return (FILTER_SCHEDULE_THREAD);
108 return (FILTER_HANDLED);
111 /*********************************************************************
113 * MSI-X VSI Interrupt Service routine
115 **********************************************************************/
117 ixl_msix_que(void *arg)
119 struct ixl_rx_queue *rx_que = arg;
123 ixl_set_queue_rx_itr(rx_que);
125 return (FILTER_SCHEDULE_THREAD);
128 /*********************************************************************
130 * MSI-X Admin Queue Interrupt Service routine
132 **********************************************************************/
134 ixl_msix_adminq(void *arg)
136 struct ixl_pf *pf = arg;
137 struct i40e_hw *hw = &pf->hw;
138 device_t dev = pf->dev;
139 u32 reg, mask, rstat_reg;
140 bool do_task = FALSE;
142 DDPRINTF(dev, "begin");
146 reg = rd32(hw, I40E_PFINT_ICR0);
148 * For masking off interrupt causes that need to be handled before
149 * they can be re-enabled
151 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
153 /* Check on the cause */
154 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
155 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
159 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
160 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
161 atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
165 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
166 const char *reset_type;
167 mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
168 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
169 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
170 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
172 /* These others might be handled similarly to an EMPR reset */
173 case I40E_RESET_CORER:
174 reset_type = "CORER";
176 case I40E_RESET_GLOBR:
177 reset_type = "GLOBR";
179 case I40E_RESET_EMPR:
186 device_printf(dev, "Reset Requested! (%s)\n", reset_type);
187 /* overload admin queue task to check reset progress */
188 atomic_set_int(&pf->state, IXL_PF_STATE_RESETTING);
193 * PE / PCI / ECC exceptions are all handled in the same way:
194 * mask out these three causes, then request a PF reset
196 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
197 device_printf(dev, "ECC Error detected!\n");
198 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
199 device_printf(dev, "PCI Exception detected!\n");
200 if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
201 device_printf(dev, "Critical Protocol Engine Error detected!\n");
202 /* Checks against the conditions above */
203 if (reg & IXL_ICR0_CRIT_ERR_MASK) {
204 mask &= ~IXL_ICR0_CRIT_ERR_MASK;
205 atomic_set_32(&pf->state,
206 IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
210 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
211 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
212 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
213 device_printf(dev, "HMC Error detected!\n");
214 device_printf(dev, "INFO 0x%08x\n", reg);
215 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
216 device_printf(dev, "DATA 0x%08x\n", reg);
217 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
222 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
223 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
224 iflib_iov_intr_deferred(pf->vsi.ctx);
228 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
229 ixl_enable_intr0(hw);
232 return (FILTER_SCHEDULE_THREAD);
234 return (FILTER_HANDLED);
238 * Configure queue interrupt cause registers in hardware.
240 * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
243 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
245 struct i40e_hw *hw = &pf->hw;
246 struct ixl_vsi *vsi = &pf->vsi;
250 for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
251 /* Make sure interrupt is disabled */
252 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
253 /* Set linked list head to point to corresponding RX queue
254 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
255 reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
256 & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
257 ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
258 & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
259 wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
261 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
262 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
263 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
264 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
265 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
266 wr32(hw, I40E_QINT_RQCTL(i), reg);
268 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
269 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
270 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
271 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
272 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
273 wr32(hw, I40E_QINT_TQCTL(i), reg);
278 * Configure for single interrupt vector operation
281 ixl_configure_legacy(struct ixl_pf *pf)
283 struct i40e_hw *hw = &pf->hw;
284 struct ixl_vsi *vsi = &pf->vsi;
287 vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
289 /* Setup "other" causes */
290 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
291 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
292 | I40E_PFINT_ICR0_ENA_GRST_MASK
293 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
294 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
295 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
296 | I40E_PFINT_ICR0_ENA_VFLR_MASK
297 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
299 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
301 /* No ITR for non-queue interrupts */
302 wr32(hw, I40E_PFINT_STAT_CTL0,
303 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
305 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
306 wr32(hw, I40E_PFINT_LNKLST0, 0);
308 /* Associate the queue pair to the vector and enable the q int */
309 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
310 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
311 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
312 wr32(hw, I40E_QINT_RQCTL(0), reg);
314 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
315 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
316 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
317 wr32(hw, I40E_QINT_TQCTL(0), reg);
321 ixl_free_pci_resources(struct ixl_pf *pf)
323 struct ixl_vsi *vsi = &pf->vsi;
324 device_t dev = iflib_get_dev(vsi->ctx);
325 struct ixl_rx_queue *rx_que = vsi->rx_queues;
327 /* We may get here before stations are set up */
332 ** Release all MSI-X VSI resources:
334 iflib_irq_free(vsi->ctx, &vsi->irq);
336 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
337 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
339 if (pf->pci_mem != NULL)
340 bus_release_resource(dev, SYS_RES_MEMORY,
341 rman_get_rid(pf->pci_mem), pf->pci_mem);
344 /*********************************************************************
346 * Setup networking device structure and register an interface.
348 **********************************************************************/
350 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
352 struct ixl_vsi *vsi = &pf->vsi;
353 if_ctx_t ctx = vsi->ctx;
354 struct i40e_hw *hw = &pf->hw;
355 struct ifnet *ifp = iflib_get_ifp(ctx);
356 struct i40e_aq_get_phy_abilities_resp abilities;
357 enum i40e_status_code aq_error = 0;
359 INIT_DBG_DEV(dev, "begin");
361 vsi->shared->isc_max_frame_size =
362 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
363 + ETHER_VLAN_ENCAP_LEN;
365 if (IXL_PF_IN_RECOVERY_MODE(pf))
368 aq_error = i40e_aq_get_phy_capabilities(hw,
369 FALSE, TRUE, &abilities, NULL);
370 /* May need delay to detect fiber correctly */
371 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
372 i40e_msec_delay(200);
373 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
374 TRUE, &abilities, NULL);
377 if (aq_error == I40E_ERR_UNKNOWN_PHY)
378 device_printf(dev, "Unknown PHY type detected!\n");
381 "Error getting supported media types, err %d,"
382 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
384 pf->supported_speeds = abilities.link_speed;
385 if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
387 ixl_add_ifmedia(vsi->media, hw->phy.phy_types);
391 /* Use autoselect media by default */
392 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
393 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
399 ** Run when the Admin Queue gets a link state change interrupt.
402 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
404 struct i40e_hw *hw = &pf->hw;
405 device_t dev = iflib_get_dev(pf->vsi.ctx);
406 struct i40e_link_status *link_info = &hw->phy.link_info;
408 /* Driver needs to re-enable delivering of link status events
409 * by FW after each event reception. Call i40e_get_link_status
410 * to do that. To not lose information about link state changes,
411 * which happened between receiving an event and the call,
412 * do not rely on status from event but use most recent
413 * status information retrieved by the call. */
414 hw->phy.get_link_info = TRUE;
415 i40e_get_link_status(hw, &pf->link_up);
417 /* Print out message if an unqualified module is found */
418 if ((link_info->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
419 (pf->advertised_speed) &&
420 (if_getflags(pf->vsi.ifp) & IFF_UP) &&
421 (!(link_info->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
422 (!(link_info->link_info & I40E_AQ_LINK_UP)))
423 device_printf(dev, "Link failed because "
424 "an unqualified module was detected!\n");
426 /* OS link info is updated elsewhere */
429 /*********************************************************************
431 * Initialize the VSI: this handles contexts, which means things
432 * like the number of descriptors, buffer size,
433 * plus we init the rings thru this function.
435 **********************************************************************/
437 ixl_initialize_vsi(struct ixl_vsi *vsi)
439 struct ixl_pf *pf = vsi->back;
440 if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
441 struct ixl_tx_queue *tx_que = vsi->tx_queues;
442 struct ixl_rx_queue *rx_que = vsi->rx_queues;
443 device_t dev = iflib_get_dev(vsi->ctx);
444 struct i40e_hw *hw = vsi->hw;
445 struct i40e_vsi_context ctxt;
449 memset(&ctxt, 0, sizeof(ctxt));
450 ctxt.seid = vsi->seid;
451 if (pf->veb_seid != 0)
452 ctxt.uplink_seid = pf->veb_seid;
453 ctxt.pf_num = hw->pf_id;
454 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
456 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
457 " aq_error %d\n", err, hw->aq.asq_last_status);
460 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
461 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
462 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
463 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
464 ctxt.uplink_seid, ctxt.vsi_number,
465 ctxt.vsis_allocated, ctxt.vsis_unallocated,
466 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
467 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
469 ** Set the queue and traffic class bits
470 ** - when multiple traffic classes are supported
471 ** this will need to be more robust.
473 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
474 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
475 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
476 ctxt.info.queue_mapping[0] = 0;
478 * This VSI will only use traffic class 0; start traffic class 0's
479 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
480 * the driver may not use all of them).
482 tc_queues = fls(pf->qtag.num_allocated) - 1;
483 ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
484 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
485 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
486 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
488 /* Set VLAN receive stripping mode */
489 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
490 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
491 if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
492 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
494 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
497 /* Set TCP Enable for iWARP capable VSI */
498 if (ixl_enable_iwarp && pf->iw_enabled) {
499 ctxt.info.valid_sections |=
500 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
501 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
504 /* Save VSI number and info for use later */
505 vsi->vsi_num = ctxt.vsi_number;
506 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
508 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
510 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
512 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
513 " aq_error %d\n", err, hw->aq.asq_last_status);
517 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
518 struct tx_ring *txr = &tx_que->txr;
519 struct i40e_hmc_obj_txq tctx;
522 /* Setup the HMC TX Context */
523 bzero(&tctx, sizeof(tctx));
524 tctx.new_context = 1;
525 tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
526 tctx.qlen = scctx->isc_ntxd[0];
527 tctx.fc_ena = 0; /* Disable FCoE */
529 * This value needs to pulled from the VSI that this queue
530 * is assigned to. Index into array is traffic class.
532 tctx.rdylist = vsi->info.qs_handle[0];
534 * Set these to enable Head Writeback
535 * - Address is last entry in TX ring (reserved for HWB index)
536 * Leave these as 0 for Descriptor Writeback
538 if (vsi->enable_head_writeback) {
539 tctx.head_wb_ena = 1;
540 tctx.head_wb_addr = txr->tx_paddr +
541 (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
543 tctx.head_wb_ena = 0;
544 tctx.head_wb_addr = 0;
546 tctx.rdylist_act = 0;
547 err = i40e_clear_lan_tx_queue_context(hw, i);
549 device_printf(dev, "Unable to clear TX context\n");
552 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
554 device_printf(dev, "Unable to set TX context\n");
557 /* Associate the ring with this PF */
558 txctl = I40E_QTX_CTL_PF_QUEUE;
559 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
560 I40E_QTX_CTL_PF_INDX_MASK);
561 wr32(hw, I40E_QTX_CTL(i), txctl);
564 /* Do ring (re)init */
565 ixl_init_tx_ring(vsi, tx_que);
567 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
568 struct rx_ring *rxr = &rx_que->rxr;
569 struct i40e_hmc_obj_rxq rctx;
571 /* Next setup the HMC RX Context */
572 rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
574 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
576 /* Set up an RX context for the HMC */
577 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
578 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
579 /* ignore header split for now */
580 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
581 rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
582 scctx->isc_max_frame_size : max_rxmax;
584 rctx.dsize = 1; /* do 32byte descriptors */
585 rctx.hsplit_0 = 0; /* no header split */
586 rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
587 rctx.qlen = scctx->isc_nrxd[0];
588 rctx.tphrdesc_ena = 1;
589 rctx.tphwdesc_ena = 1;
590 rctx.tphdata_ena = 0; /* Header Split related */
591 rctx.tphhead_ena = 0; /* Header Split related */
592 rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */
595 rctx.showiv = 1; /* Strip inner VLAN header */
596 rctx.fc_ena = 0; /* Disable FCoE */
597 rctx.prefena = 1; /* Prefetch descriptors */
599 err = i40e_clear_lan_rx_queue_context(hw, i);
602 "Unable to clear RX context %d\n", i);
605 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
607 device_printf(dev, "Unable to set RX context %d\n", i);
610 wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
617 ** Provide a update to the queue RX
618 ** interrupt moderation value.
621 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
623 struct ixl_vsi *vsi = que->vsi;
624 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
625 struct i40e_hw *hw = vsi->hw;
626 struct rx_ring *rxr = &que->rxr;
631 /* Idle, do nothing */
635 if (pf->dynamic_rx_itr) {
636 rx_bytes = rxr->bytes/rxr->itr;
639 /* Adjust latency range */
640 switch (rxr->latency) {
641 case IXL_LOW_LATENCY:
643 rx_latency = IXL_AVE_LATENCY;
644 rx_itr = IXL_ITR_20K;
647 case IXL_AVE_LATENCY:
649 rx_latency = IXL_BULK_LATENCY;
651 } else if (rx_bytes <= 10) {
652 rx_latency = IXL_LOW_LATENCY;
653 rx_itr = IXL_ITR_100K;
656 case IXL_BULK_LATENCY:
657 if (rx_bytes <= 20) {
658 rx_latency = IXL_AVE_LATENCY;
659 rx_itr = IXL_ITR_20K;
664 rxr->latency = rx_latency;
666 if (rx_itr != rxr->itr) {
667 /* do an exponential smoothing */
668 rx_itr = (10 * rx_itr * rxr->itr) /
669 ((9 * rx_itr) + rxr->itr);
670 rxr->itr = min(rx_itr, IXL_MAX_ITR);
671 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
674 } else { /* We may have have toggled to non-dynamic */
675 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
676 vsi->rx_itr_setting = pf->rx_itr;
677 /* Update the hardware if needed */
678 if (rxr->itr != vsi->rx_itr_setting) {
679 rxr->itr = vsi->rx_itr_setting;
680 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
690 ** Provide a update to the queue TX
691 ** interrupt moderation value.
694 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
696 struct ixl_vsi *vsi = que->vsi;
697 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
698 struct i40e_hw *hw = vsi->hw;
699 struct tx_ring *txr = &que->txr;
705 /* Idle, do nothing */
709 if (pf->dynamic_tx_itr) {
710 tx_bytes = txr->bytes/txr->itr;
713 switch (txr->latency) {
714 case IXL_LOW_LATENCY:
716 tx_latency = IXL_AVE_LATENCY;
717 tx_itr = IXL_ITR_20K;
720 case IXL_AVE_LATENCY:
722 tx_latency = IXL_BULK_LATENCY;
724 } else if (tx_bytes <= 10) {
725 tx_latency = IXL_LOW_LATENCY;
726 tx_itr = IXL_ITR_100K;
729 case IXL_BULK_LATENCY:
730 if (tx_bytes <= 20) {
731 tx_latency = IXL_AVE_LATENCY;
732 tx_itr = IXL_ITR_20K;
737 txr->latency = tx_latency;
739 if (tx_itr != txr->itr) {
740 /* do an exponential smoothing */
741 tx_itr = (10 * tx_itr * txr->itr) /
742 ((9 * tx_itr) + txr->itr);
743 txr->itr = min(tx_itr, IXL_MAX_ITR);
744 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
748 } else { /* We may have have toggled to non-dynamic */
749 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
750 vsi->tx_itr_setting = pf->tx_itr;
751 /* Update the hardware if needed */
752 if (txr->itr != vsi->tx_itr_setting) {
753 txr->itr = vsi->tx_itr_setting;
754 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
765 * ixl_sysctl_qtx_tail_handler
766 * Retrieves I40E_QTX_TAIL value from hardware
770 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
772 struct ixl_tx_queue *tx_que;
776 tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
777 if (!tx_que) return 0;
779 val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
780 error = sysctl_handle_int(oidp, &val, 0, req);
781 if (error || !req->newptr)
787 * ixl_sysctl_qrx_tail_handler
788 * Retrieves I40E_QRX_TAIL value from hardware
792 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
794 struct ixl_rx_queue *rx_que;
798 rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
799 if (!rx_que) return 0;
801 val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
802 error = sysctl_handle_int(oidp, &val, 0, req);
803 if (error || !req->newptr)
810 ixl_add_hw_stats(struct ixl_pf *pf)
812 struct ixl_vsi *vsi = &pf->vsi;
813 device_t dev = iflib_get_dev(vsi->ctx);
814 struct i40e_hw_port_stats *pf_stats = &pf->stats;
816 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
817 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
818 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
820 /* Driver statistics */
821 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
822 CTLFLAG_RD, &pf->admin_irq,
823 "Admin Queue IRQs received");
825 sysctl_ctx_init(&vsi->sysctl_ctx);
826 ixl_vsi_add_sysctls(vsi, "pf", true);
828 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
832 ixl_set_rss_hlut(struct ixl_pf *pf)
834 struct i40e_hw *hw = &pf->hw;
835 struct ixl_vsi *vsi = &pf->vsi;
836 device_t dev = iflib_get_dev(vsi->ctx);
840 enum i40e_status_code status;
842 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
844 /* Populate the LUT with max no. of queues in round robin fashion */
846 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
849 * Fetch the RSS bucket id for the given indirection entry.
850 * Cap it at the number of configured buckets (which is
853 que_id = rss_get_indirection_to_bucket(i);
854 que_id = que_id % vsi->num_rx_queues;
856 que_id = i % vsi->num_rx_queues;
858 lut = (que_id & ((0x1 << lut_entry_width) - 1));
862 if (hw->mac.type == I40E_MAC_X722) {
863 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
865 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
866 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
868 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
869 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
874 /* For PF VSI only */
876 ixl_enable_rings(struct ixl_vsi *vsi)
878 struct ixl_pf *pf = vsi->back;
881 for (int i = 0; i < vsi->num_tx_queues; i++)
882 error = ixl_enable_tx_ring(pf, &pf->qtag, i);
884 for (int i = 0; i < vsi->num_rx_queues; i++)
885 error = ixl_enable_rx_ring(pf, &pf->qtag, i);
891 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
895 for (int i = 0; i < vsi->num_tx_queues; i++)
896 error = ixl_disable_tx_ring(pf, qtag, i);
898 for (int i = 0; i < vsi->num_rx_queues; i++)
899 error = ixl_disable_rx_ring(pf, qtag, i);
905 ixl_enable_intr(struct ixl_vsi *vsi)
907 struct i40e_hw *hw = vsi->hw;
908 struct ixl_rx_queue *que = vsi->rx_queues;
910 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
911 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
912 ixl_enable_queue(hw, que->rxr.me);
914 ixl_enable_intr0(hw);
918 ixl_disable_rings_intr(struct ixl_vsi *vsi)
920 struct i40e_hw *hw = vsi->hw;
921 struct ixl_rx_queue *que = vsi->rx_queues;
923 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
924 ixl_disable_queue(hw, que->rxr.me);
928 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
930 struct i40e_hw *hw = &pf->hw;
931 device_t dev = pf->dev;
935 ixl_if_stop(pf->vsi.ctx);
937 ixl_shutdown_hmc(pf);
939 ixl_disable_intr0(hw);
941 error = i40e_shutdown_adminq(hw);
944 "Shutdown Admin queue failed with code %d\n", error);
946 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
951 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
953 struct i40e_hw *hw = &pf->hw;
954 struct ixl_vsi *vsi = &pf->vsi;
955 device_t dev = pf->dev;
956 enum i40e_get_fw_lldp_status_resp lldp_status;
959 device_printf(dev, "Rebuilding driver state...\n");
962 error = i40e_init_adminq(hw);
963 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
964 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
966 goto ixl_rebuild_hw_structs_after_reset_err;
969 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
970 /* Keep admin queue interrupts active while driver is loaded */
971 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
972 ixl_configure_intr0_msix(pf);
973 ixl_enable_intr0(hw);
979 i40e_clear_pxe_mode(hw);
981 error = ixl_get_hw_capabilities(pf);
983 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
984 goto ixl_rebuild_hw_structs_after_reset_err;
987 error = ixl_setup_hmc(pf);
989 goto ixl_rebuild_hw_structs_after_reset_err;
991 /* reserve a contiguous allocation for the PF's VSI */
992 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
994 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
998 error = ixl_switch_config(pf);
1000 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
1003 goto ixl_rebuild_hw_structs_after_reset_err;
1006 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
1009 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
1010 " aq_err %d\n", error, hw->aq.asq_last_status);
1012 goto ixl_rebuild_hw_structs_after_reset_err;
1016 error = i40e_set_fc(hw, &set_fc_err_mask, true);
1018 device_printf(dev, "init: setting link flow control failed; retcode %d,"
1019 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
1021 goto ixl_rebuild_hw_structs_after_reset_err;
1024 /* Remove default filters reinstalled by FW on reset */
1025 ixl_del_default_hw_filters(vsi);
1027 /* Receive broadcast Ethernet frames */
1028 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1030 /* Determine link state */
1031 if (ixl_attach_get_link_status(pf)) {
1035 i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
1037 /* Query device FW LLDP status */
1038 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
1039 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
1040 atomic_set_32(&pf->state,
1041 IXL_PF_STATE_FW_LLDP_DISABLED);
1043 atomic_clear_32(&pf->state,
1044 IXL_PF_STATE_FW_LLDP_DISABLED);
1048 /* Keep admin queue interrupts active while driver is loaded */
1049 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1050 ixl_configure_intr0_msix(pf);
1051 ixl_enable_intr0(hw);
1055 iflib_request_reset(vsi->ctx);
1056 iflib_admin_intr_deferred(vsi->ctx);
1059 device_printf(dev, "Rebuilding driver state done.\n");
1062 ixl_rebuild_hw_structs_after_reset_err:
1063 device_printf(dev, "Reload the driver to recover\n");
1068 ** Set flow control using sysctl:
1075 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
1077 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1078 struct i40e_hw *hw = &pf->hw;
1079 device_t dev = pf->dev;
1080 int requested_fc, error = 0;
1081 enum i40e_status_code aq_error = 0;
1085 requested_fc = pf->fc;
1086 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
1087 if ((error) || (req->newptr == NULL))
1089 if (requested_fc < 0 || requested_fc > 3) {
1091 "Invalid fc mode; valid modes are 0 through 3\n");
1095 /* Set fc ability for port */
1096 hw->fc.requested_mode = requested_fc;
1097 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
1100 "%s: Error setting Flow Control mode %d; fc_err %#x\n",
1101 __func__, aq_error, fc_aq_err);
1104 pf->fc = requested_fc;