1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
38 #include "ixl_iw_int.h"
42 #define IXL_IW_VEC_BASE(pf) ((pf)->msix - (pf)->iw_msix)
43 #define IXL_IW_VEC_COUNT(pf) ((pf)->iw_msix)
44 #define IXL_IW_VEC_LIMIT(pf) ((pf)->msix)
46 extern int ixl_enable_iwarp;
48 static struct ixl_iw_state ixl_iw;
49 static int ixl_iw_ref_cnt;
52 ixl_iw_pf_msix_reset(struct ixl_pf *pf)
54 struct i40e_hw *hw = &pf->hw;
58 for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
59 reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
60 wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
67 ixl_iw_invoke_op(void *context, int pending)
69 struct ixl_iw_pf_entry *pf_entry = (struct ixl_iw_pf_entry *)context;
70 struct ixl_iw_pf info;
74 INIT_DEBUGOUT("begin");
76 mtx_lock(&ixl_iw.mtx);
77 if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) &&
78 (pf_entry->state.iw_current == IXL_IW_PF_STATE_OFF))
80 else if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_OFF) &&
81 (pf_entry->state.iw_current == IXL_IW_PF_STATE_ON))
84 /* nothing to be done, so finish here */
85 mtx_unlock(&ixl_iw.mtx);
88 info = pf_entry->pf_info;
89 mtx_unlock(&ixl_iw.mtx);
92 err = ixl_iw.ops->init(&info);
94 device_printf(pf_entry->pf->dev,
95 "%s: failed to initialize iwarp (err %d)\n",
98 pf_entry->state.iw_current = IXL_IW_PF_STATE_ON;
100 err = ixl_iw.ops->stop(&info);
102 device_printf(pf_entry->pf->dev,
103 "%s: failed to stop iwarp (err %d)\n",
106 ixl_iw_pf_msix_reset(pf_entry->pf);
107 pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF;
116 INIT_DEBUGOUT("begin");
118 mtx_destroy(&ixl_iw.mtx);
126 INIT_DEBUGOUT("begin");
128 LIST_INIT(&ixl_iw.pfs);
129 mtx_init(&ixl_iw.mtx, "ixl_iw_pfs", NULL, MTX_DEF);
130 ixl_iw.registered = false;
135 /******************************************************************************
136 * if_ixl internal API
137 *****************************************************************************/
140 ixl_iw_pf_init(struct ixl_pf *pf)
142 struct ixl_iw_pf_entry *pf_entry;
143 struct ixl_iw_pf *pf_info;
146 INIT_DEBUGOUT("begin");
148 mtx_lock(&ixl_iw.mtx);
150 LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
151 if (pf_entry->pf == pf)
153 if (pf_entry == NULL) {
154 /* attempt to initialize PF not yet attached - sth is wrong */
155 device_printf(pf->dev, "%s: PF not found\n", __func__);
160 pf_info = &pf_entry->pf_info;
162 pf_info->handle = (void *)pf;
164 pf_info->ifp = pf->vsi.ifp;
165 pf_info->dev = pf->dev;
166 pf_info->pci_mem = pf->pci_mem;
167 pf_info->pf_id = pf->hw.pf_id;
168 pf_info->mtu = pf->vsi.ifp->if_mtu;
170 pf_info->iw_msix.count = IXL_IW_VEC_COUNT(pf);
171 pf_info->iw_msix.base = IXL_IW_VEC_BASE(pf);
173 for (int i = 0; i < IXL_IW_MAX_USER_PRIORITY; i++)
174 pf_info->qs_handle[i] = le16_to_cpu(pf->vsi.info.qs_handle[0]);
176 pf_entry->state.pf = IXL_IW_PF_STATE_ON;
177 if (ixl_iw.registered) {
178 pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
179 taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
183 mtx_unlock(&ixl_iw.mtx);
189 ixl_iw_pf_stop(struct ixl_pf *pf)
191 struct ixl_iw_pf_entry *pf_entry;
193 INIT_DEBUGOUT("begin");
195 mtx_lock(&ixl_iw.mtx);
197 LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
198 if (pf_entry->pf == pf)
200 if (pf_entry == NULL) {
201 /* attempt to stop PF which has not been attached - sth is wrong */
202 device_printf(pf->dev, "%s: PF not found\n", __func__);
206 pf_entry->state.pf = IXL_IW_PF_STATE_OFF;
207 if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
208 pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
209 if (ixl_iw.registered)
210 taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
214 mtx_unlock(&ixl_iw.mtx);
220 ixl_iw_pf_attach(struct ixl_pf *pf)
222 struct ixl_iw_pf_entry *pf_entry;
225 INIT_DEBUGOUT("begin");
227 if (ixl_iw_ref_cnt == 0)
230 mtx_lock(&ixl_iw.mtx);
232 LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
233 if (pf_entry->pf == pf) {
234 device_printf(pf->dev, "%s: PF already exists\n",
240 pf_entry = malloc(sizeof(struct ixl_iw_pf_entry),
241 M_DEVBUF, M_NOWAIT | M_ZERO);
242 if (pf_entry == NULL) {
243 device_printf(pf->dev,
244 "%s: failed to allocate memory to attach new PF\n",
250 pf_entry->state.pf = IXL_IW_PF_STATE_OFF;
251 pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
252 pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF;
254 LIST_INSERT_HEAD(&ixl_iw.pfs, pf_entry, node);
257 TASK_INIT(&pf_entry->iw_task, 0, ixl_iw_invoke_op, pf_entry);
259 mtx_unlock(&ixl_iw.mtx);
265 ixl_iw_pf_detach(struct ixl_pf *pf)
267 struct ixl_iw_pf_entry *pf_entry;
270 INIT_DEBUGOUT("begin");
272 mtx_lock(&ixl_iw.mtx);
274 LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
275 if (pf_entry->pf == pf)
277 if (pf_entry == NULL) {
278 /* attempt to stop PF which has not been attached - sth is wrong */
279 device_printf(pf->dev, "%s: PF not found\n", __func__);
284 if (pf_entry->state.pf != IXL_IW_PF_STATE_OFF) {
285 /* attempt to detach PF which has not yet been stopped - sth is wrong */
286 device_printf(pf->dev, "%s: failed - PF is still active\n",
291 LIST_REMOVE(pf_entry, node);
292 free(pf_entry, M_DEVBUF);
296 mtx_unlock(&ixl_iw.mtx);
298 if (ixl_iw_ref_cnt == 0)
305 /******************************************************************************
306 * API exposed to iw_ixl module
307 *****************************************************************************/
310 ixl_iw_pf_reset(void *pf_handle)
312 struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
314 INIT_DEBUGOUT("begin");
324 ixl_iw_pf_msix_init(void *pf_handle,
325 struct ixl_iw_msix_mapping *msix_info)
327 struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
328 struct i40e_hw *hw = &pf->hw;
332 INIT_DEBUGOUT("begin");
334 if ((msix_info->aeq_vector < IXL_IW_VEC_BASE(pf)) ||
335 (msix_info->aeq_vector >= IXL_IW_VEC_LIMIT(pf))) {
336 printf("%s: invalid MSIX vector (%i) for AEQ\n",
337 __func__, msix_info->aeq_vector);
340 reg = I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
341 (msix_info->aeq_vector << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
342 (msix_info->itr_indx << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT);
343 wr32(hw, I40E_PFINT_AEQCTL, reg);
345 for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
346 for (i = 0; i < msix_info->ceq_cnt; i++)
347 if (msix_info->ceq_vector[i] == vec)
349 if (i == msix_info->ceq_cnt) {
350 /* this vector has no CEQ mapped */
351 reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
352 wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
354 reg = (i & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
355 (I40E_QUEUE_TYPE_PE_CEQ <<
356 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
357 wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
359 reg = I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
360 (vec << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
361 (msix_info->itr_indx <<
362 I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
364 I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT);
365 wr32(hw, I40E_PFINT_CEQCTL(i), reg);
373 ixl_iw_register(struct ixl_iw_ops *ops)
375 struct ixl_iw_pf_entry *pf_entry;
377 int iwarp_cap_on_pfs = 0;
379 INIT_DEBUGOUT("begin");
380 LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
381 iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
382 if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
383 printf("%s: the device is not iwarp-capable, registering dropped\n",
387 if (ixl_enable_iwarp == 0) {
388 printf("%s: enable_iwarp is off, registering dropped\n",
393 if ((ops->init == NULL) || (ops->stop == NULL)) {
394 printf("%s: invalid iwarp driver ops\n", __func__);
398 mtx_lock(&ixl_iw.mtx);
399 if (ixl_iw.registered) {
400 printf("%s: iwarp driver already registered\n", __func__);
404 ixl_iw.registered = true;
405 mtx_unlock(&ixl_iw.mtx);
407 ixl_iw.tq = taskqueue_create("ixl_iw", M_NOWAIT,
408 taskqueue_thread_enqueue, &ixl_iw.tq);
409 if (ixl_iw.tq == NULL) {
410 printf("%s: failed to create queue\n", __func__);
411 ixl_iw.registered = false;
414 taskqueue_start_threads(&ixl_iw.tq, 1, PI_NET, "ixl iw");
416 ixl_iw.ops = malloc(sizeof(struct ixl_iw_ops),
417 M_DEVBUF, M_NOWAIT | M_ZERO);
418 if (ixl_iw.ops == NULL) {
419 printf("%s: failed to allocate memory\n", __func__);
420 taskqueue_free(ixl_iw.tq);
421 ixl_iw.registered = false;
425 ixl_iw.ops->init = ops->init;
426 ixl_iw.ops->stop = ops->stop;
428 mtx_lock(&ixl_iw.mtx);
429 LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
430 if (pf_entry->state.pf == IXL_IW_PF_STATE_ON) {
431 pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
432 taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
435 mtx_unlock(&ixl_iw.mtx);
441 ixl_iw_unregister(void)
443 struct ixl_iw_pf_entry *pf_entry;
444 int iwarp_cap_on_pfs = 0;
446 INIT_DEBUGOUT("begin");
448 LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
449 iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
450 if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
451 printf("%s: attempt to unregister driver when no iwarp-capable device present\n",
456 if (ixl_enable_iwarp == 0) {
457 printf("%s: attempt to unregister driver when enable_iwarp is off\n",
461 mtx_lock(&ixl_iw.mtx);
463 if (!ixl_iw.registered) {
464 printf("%s: failed - iwarp driver has not been registered\n",
466 mtx_unlock(&ixl_iw.mtx);
470 LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
471 if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
472 pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
473 taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
476 ixl_iw.registered = false;
478 mtx_unlock(&ixl_iw.mtx);
480 LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
481 taskqueue_drain(ixl_iw.tq, &pf_entry->iw_task);
482 taskqueue_free(ixl_iw.tq);
484 free(ixl_iw.ops, M_DEVBUF);