4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include "lio_common.h"
39 #include "lio_response_manager.h"
40 #include "lio_device.h"
43 static void lio_poll_req_completion(void *arg, int pending);
46 lio_setup_response_list(struct octeon_device *oct)
51 for (i = 0; i < LIO_MAX_RESPONSE_LISTS; i++) {
52 STAILQ_INIT(&oct->response_list[i].head);
53 mtx_init(&oct->response_list[i].lock, "response_list_lock",
55 atomic_store_rel_int(&oct->response_list[i].pending_req_count,
58 mtx_init(&oct->cmd_resp_wqlock, "cmd_resp_wqlock", NULL, MTX_DEF);
60 ctq = &oct->dma_comp_tq;
61 ctq->tq = taskqueue_create("lio_dma_comp", M_WAITOK,
62 taskqueue_thread_enqueue, &ctq->tq);
63 if (ctq->tq == NULL) {
64 lio_dev_err(oct, "failed to create wq thread\n");
68 TIMEOUT_TASK_INIT(ctq->tq, &ctq->work, 0, lio_poll_req_completion,
72 oct->cmd_resp_state = LIO_DRV_ONLINE;
73 taskqueue_start_threads(&ctq->tq, 1, PI_NET, "lio%d_dma_comp",
75 taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50));
81 lio_delete_response_list(struct octeon_device *oct)
84 if (oct->dma_comp_tq.tq != NULL) {
85 while (taskqueue_cancel_timeout(oct->dma_comp_tq.tq,
86 &oct->dma_comp_tq.work, NULL))
87 taskqueue_drain_timeout(oct->dma_comp_tq.tq,
88 &oct->dma_comp_tq.work);
89 taskqueue_free(oct->dma_comp_tq.tq);
90 oct->dma_comp_tq.tq = NULL;
95 lio_process_ordered_list(struct octeon_device *octeon_dev,
98 struct lio_response_list *ordered_sc_list;
99 struct lio_soft_command *sc;
102 int request_complete = 0;
105 resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
107 ordered_sc_list = &octeon_dev->response_list[LIO_ORDERED_SC_LIST];
110 mtx_lock(&ordered_sc_list->lock);
112 if (STAILQ_EMPTY(&ordered_sc_list->head)) {
114 * ordered_sc_list is empty; there is nothing to
117 mtx_unlock(&ordered_sc_list->lock);
121 sc = LIO_STAILQ_FIRST_ENTRY(&ordered_sc_list->head,
122 struct lio_soft_command, node);
124 status = LIO_REQUEST_PENDING;
127 * check if octeon has finished DMA'ing a response to where
128 * rptr is pointing to
130 status64 = *sc->status_word;
132 if (status64 != COMPLETION_WORD_INIT) {
134 * This logic ensures that all 64b have been written.
135 * 1. check byte 0 for non-FF
136 * 2. if non-FF, then swap result from BE to host order
137 * 3. check byte 7 (swapped to 0) for non-FF
138 * 4. if non-FF, use the low 32-bit status code
139 * 5. if either byte 0 or byte 7 is FF, don't use status
141 if ((status64 & 0xff) != 0xff) {
142 lio_swap_8B_data(&status64, 1);
143 if (((status64 & 0xff) != 0xff)) {
144 /* retrieve 16-bit firmware status */
145 status = (uint32_t)(status64 &
148 status = LIO_FW_STATUS_CODE(
152 status = LIO_REQUEST_DONE;
156 } else if (force_quit || (sc->timeout &&
157 lio_check_timeout(ticks, sc->timeout))) {
158 lio_dev_err(octeon_dev, "%s: cmd failed, timeout (%u, %u)\n",
159 __func__, ticks, sc->timeout);
160 status = LIO_REQUEST_TIMEOUT;
163 if (status != LIO_REQUEST_PENDING) {
164 /* we have received a response or we have timed out */
165 /* remove node from linked list */
166 STAILQ_REMOVE(&octeon_dev->response_list
167 [LIO_ORDERED_SC_LIST].head,
168 &sc->node, lio_stailq_node, entries);
169 atomic_subtract_int(&octeon_dev->response_list
170 [LIO_ORDERED_SC_LIST].
171 pending_req_count, 1);
172 mtx_unlock(&ordered_sc_list->lock);
174 if (sc->callback != NULL)
175 sc->callback(octeon_dev, status,
181 /* no response yet */
182 request_complete = 0;
183 mtx_unlock(&ordered_sc_list->lock);
187 * If we hit the Max Ordered requests to process every loop,
188 * we quit and let this function be invoked the next time
189 * the poll thread runs to process the remaining requests.
190 * This function can take up the entire CPU if there is no
191 * upper limit to the requests processed.
193 if (request_complete >= resp_to_process)
195 } while (request_complete);
201 lio_poll_req_completion(void *arg, int pending)
203 struct lio_tq *ctq = (struct lio_tq *)arg;
204 struct octeon_device *oct = (struct octeon_device *)ctq->ctxptr;
206 lio_process_ordered_list(oct, 0);
207 taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50));