4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include "lio_common.h"
39 #include "lio_response_manager.h"
40 #include "lio_device.h"
42 #include "lio_network.h"
43 #include "cn23xx_pf_device.h"
46 struct lio_iq_post_status {
51 static void lio_check_db_timeout(void *arg, int pending);
52 static void __lio_check_db_timeout(struct octeon_device *oct,
55 /* Return 0 on success, 1 on failure */
57 lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq,
60 struct lio_instr_queue *iq;
61 struct lio_iq_config *conf = NULL;
63 struct lio_request_list *request_buf;
65 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
69 if (LIO_CN23XX_PF(oct))
70 conf = &(LIO_GET_IQ_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)));
72 lio_dev_err(oct, "Unsupported Chip %x\n", oct->chip_id);
76 q_size = (uint32_t)conf->instr_type * num_descs;
77 iq = oct->instr_queue[iq_no];
80 max_size = LIO_CN23XX_PKI_MAX_FRAME_SIZE * num_descs;
82 error = bus_dma_tag_create(bus_get_dma_tag(oct->device), /* parent */
83 1, 0, /* alignment, bounds */
84 BUS_SPACE_MAXADDR, /* lowaddr */
85 BUS_SPACE_MAXADDR, /* highaddr */
86 NULL, NULL, /* filter, filterarg */
87 max_size, /* maxsize */
88 LIO_MAX_SG, /* nsegments */
89 PAGE_SIZE, /* maxsegsize */
92 NULL, /* lockfuncarg */
95 lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
100 iq->base_addr = lio_dma_alloc(q_size, (vm_paddr_t *)&iq->base_addr_dma);
101 if (!iq->base_addr) {
102 lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
107 iq->max_count = num_descs;
110 * Initialize a list to holds requests that have been posted to
111 * Octeon but has yet to be fetched by octeon
113 iq->request_list = malloc(sizeof(*iq->request_list) * num_descs,
114 M_DEVBUF, M_NOWAIT | M_ZERO);
115 if (iq->request_list == NULL) {
116 lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n",
121 lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %llx count: %d\n",
122 iq_no, iq->base_addr, LIO_CAST64(iq->base_addr_dma),
125 /* Create the descriptor buffer dma maps */
126 request_buf = iq->request_list;
127 for (i = 0; i < num_descs; i++, request_buf++) {
128 error = bus_dmamap_create(iq->txtag, 0, &request_buf->map);
130 lio_dev_err(oct, "Unable to create TX DMA map\n");
135 iq->txpciq.txpciq64 = txpciq.txpciq64;
137 iq->host_write_index = 0;
138 iq->octeon_read_index = 0;
140 iq->last_db_time = 0;
141 iq->db_timeout = (uint32_t)conf->db_timeout;
142 atomic_store_rel_int(&iq->instr_pending, 0);
144 /* Initialize the lock for this instruction queue */
145 mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF);
146 mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF);
147 mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF);
149 mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL,
152 oct->io_qmask.iq |= BIT_ULL(iq_no);
154 /* Set the 32B/64B mode for each input queue */
155 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
156 iq->iqcmd_64B = (conf->instr_type == 64);
158 oct->fn_list.setup_iq_regs(oct, iq_no);
160 db_tq = &oct->check_db_tq[iq_no];
161 db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK,
162 taskqueue_thread_enqueue, &db_tq->tq);
163 if (db_tq->tq == NULL) {
164 lio_dev_err(oct, "check db wq create failed for iq %d\n",
169 TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout,
171 db_tq->ctxul = iq_no;
174 taskqueue_start_threads(&db_tq->tq, 1, PI_NET,
175 "lio%d_check_db_timeout:%d",
176 oct->octeon_id, iq_no);
177 taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 1);
179 /* Allocate a buf ring */
180 oct->instr_queue[iq_no]->br =
181 buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK,
182 &oct->instr_queue[iq_no]->enq_lock);
183 if (oct->instr_queue[iq_no]->br == NULL) {
184 lio_dev_err(oct, "Critical Failure setting up buf ring\n");
192 lio_delete_instr_queue(struct octeon_device *oct, uint32_t iq_no)
194 struct lio_instr_queue *iq = oct->instr_queue[iq_no];
195 struct lio_request_list *request_buf;
196 struct lio_mbuf_free_info *finfo;
197 uint64_t desc_size = 0, q_size;
200 lio_dev_dbg(oct, "%s[%d]\n", __func__, iq_no);
202 if (oct->check_db_tq[iq_no].tq != NULL) {
203 while (taskqueue_cancel_timeout(oct->check_db_tq[iq_no].tq,
204 &oct->check_db_tq[iq_no].work,
206 taskqueue_drain_timeout(oct->check_db_tq[iq_no].tq,
207 &oct->check_db_tq[iq_no].work);
208 taskqueue_free(oct->check_db_tq[iq_no].tq);
209 oct->check_db_tq[iq_no].tq = NULL;
212 if (LIO_CN23XX_PF(oct))
214 LIO_GET_IQ_INSTR_TYPE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf));
216 request_buf = iq->request_list;
217 for (i = 0; i < iq->max_count; i++, request_buf++) {
218 if ((request_buf->reqtype == LIO_REQTYPE_NORESP_NET) ||
219 (request_buf->reqtype == LIO_REQTYPE_NORESP_NET_SG)) {
220 if (request_buf->buf != NULL) {
221 finfo = request_buf->buf;
222 bus_dmamap_sync(iq->txtag, request_buf->map,
223 BUS_DMASYNC_POSTWRITE);
224 bus_dmamap_unload(iq->txtag,
227 request_buf->buf = NULL;
228 if (request_buf->map != NULL) {
229 bus_dmamap_destroy(iq->txtag,
231 request_buf->map = NULL;
233 } else if (request_buf->map != NULL) {
234 bus_dmamap_unload(iq->txtag, request_buf->map);
235 bus_dmamap_destroy(iq->txtag, request_buf->map);
236 request_buf->map = NULL;
241 if (iq->br != NULL) {
242 buf_ring_free(iq->br, M_DEVBUF);
246 if (iq->request_list != NULL) {
247 free(iq->request_list, M_DEVBUF);
248 iq->request_list = NULL;
251 if (iq->txtag != NULL) {
252 bus_dma_tag_destroy(iq->txtag);
257 q_size = iq->max_count * desc_size;
258 lio_dma_free((uint32_t)q_size, iq->base_addr);
260 oct->io_qmask.iq &= ~(1ULL << iq_no);
261 bzero(oct->instr_queue[iq_no], sizeof(struct lio_instr_queue));
270 /* Return 0 on success, 1 on failure */
272 lio_setup_iq(struct octeon_device *oct, int ifidx, int q_index,
273 union octeon_txpciq txpciq, uint32_t num_descs)
275 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
277 if (oct->instr_queue[iq_no]->oct_dev != NULL) {
278 lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n",
280 oct->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
284 oct->instr_queue[iq_no]->q_index = q_index;
285 oct->instr_queue[iq_no]->ifidx = ifidx;
287 if (lio_init_instr_queue(oct, txpciq, num_descs)) {
288 lio_delete_instr_queue(oct, iq_no);
293 if (oct->fn_list.enable_io_queues(oct))
300 lio_wait_for_instr_fetch(struct octeon_device *oct)
302 int i, retry = 1000, pending, instr_cnt = 0;
307 for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
308 if (!(oct->io_qmask.iq & BIT_ULL(i)))
310 pending = atomic_load_acq_int(
311 &oct->instr_queue[i]->instr_pending);
313 __lio_check_db_timeout(oct, i);
314 instr_cnt += pending;
320 lio_sleep_timeout(1);
322 } while (retry-- && instr_cnt);
328 lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq)
331 if (atomic_load_acq_int(&oct->status) == LIO_DEV_RUNNING) {
332 lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt);
333 /* make sure doorbell write goes through */
336 iq->last_db_time = ticks;
342 __lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
344 uint8_t *iqptr, cmdsize;
346 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
347 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
349 memcpy(iqptr, cmd, cmdsize);
352 static inline struct lio_iq_post_status
353 __lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
355 struct lio_iq_post_status st;
357 st.status = LIO_IQ_SEND_OK;
360 * This ensures that the read index does not wrap around to the same
361 * position if queue gets full before Octeon could fetch any instr.
363 if (atomic_load_acq_int(&iq->instr_pending) >=
364 (int32_t)(iq->max_count - 1)) {
365 st.status = LIO_IQ_SEND_FAILED;
370 if (atomic_load_acq_int(&iq->instr_pending) >=
371 (int32_t)(iq->max_count - 2))
372 st.status = LIO_IQ_SEND_STOP;
374 __lio_copy_cmd_into_iq(iq, cmd);
376 /* "index" is returned, host_write_index is modified. */
377 st.index = iq->host_write_index;
378 iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
383 * Flush the command into memory. We need to be sure the data is in
384 * memory before indicating that the instruction is pending.
388 atomic_add_int(&iq->instr_pending, 1);
394 __lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf,
398 iq->request_list[idx].buf = buf;
399 iq->request_list[idx].reqtype = reqtype;
402 /* Can only run in process context */
404 lio_process_iq_request_list(struct octeon_device *oct,
405 struct lio_instr_queue *iq, uint32_t budget)
407 struct lio_soft_command *sc;
408 struct octeon_instr_irh *irh = NULL;
409 struct lio_mbuf_free_info *finfo;
411 uint32_t inst_count = 0;
412 uint32_t old = iq->flush_index;
415 while (old != iq->octeon_read_index) {
416 reqtype = iq->request_list[old].reqtype;
417 buf = iq->request_list[old].buf;
420 if (reqtype == LIO_REQTYPE_NONE)
424 case LIO_REQTYPE_NORESP_NET:
425 lio_free_mbuf(iq, buf);
427 case LIO_REQTYPE_NORESP_NET_SG:
428 lio_free_sgmbuf(iq, buf);
430 case LIO_REQTYPE_RESP_NET:
431 case LIO_REQTYPE_SOFT_COMMAND:
433 if (LIO_CN23XX_PF(oct))
434 irh = (struct octeon_instr_irh *)
438 * We're expecting a response from Octeon.
439 * It's up to lio_process_ordered_list() to
440 * process sc. Add sc to the ordered soft
441 * command response list because we expect
442 * a response from Octeon.
444 mtx_lock(&oct->response_list
445 [LIO_ORDERED_SC_LIST].lock);
446 atomic_add_int(&oct->response_list
447 [LIO_ORDERED_SC_LIST].
448 pending_req_count, 1);
449 STAILQ_INSERT_TAIL(&oct->response_list
450 [LIO_ORDERED_SC_LIST].
451 head, &sc->node, entries);
452 mtx_unlock(&oct->response_list
453 [LIO_ORDERED_SC_LIST].lock);
455 if (sc->callback != NULL) {
456 /* This callback must not sleep */
457 sc->callback(oct, LIO_REQUEST_DONE,
464 lio_dev_err(oct, "%s Unknown reqtype: %d buf: %p at idx %d\n",
465 __func__, reqtype, buf, old);
468 iq->request_list[old].buf = NULL;
469 iq->request_list[old].reqtype = 0;
473 old = lio_incr_index(old, 1, iq->max_count);
475 if ((budget) && (inst_count >= budget))
479 iq->flush_index = old;
484 /* Can only be called from process context */
486 lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq,
489 uint32_t inst_processed = 0;
490 uint32_t tot_inst_processed = 0;
493 if (!mtx_trylock(&iq->iq_flush_running_lock))
498 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
501 /* Process any outstanding IQ packets. */
502 if (iq->flush_index == iq->octeon_read_index)
507 lio_process_iq_request_list(oct, iq,
512 lio_process_iq_request_list(oct, iq, 0);
514 if (inst_processed) {
515 atomic_subtract_int(&iq->instr_pending, inst_processed);
516 iq->stats.instr_processed += inst_processed;
518 tot_inst_processed += inst_processed;
521 } while (tot_inst_processed < budget);
523 if (budget && (tot_inst_processed >= budget))
526 iq->last_db_time = ticks;
528 mtx_unlock(&iq->lock);
530 mtx_unlock(&iq->iq_flush_running_lock);
536 * Process instruction queue after timeout.
537 * This routine gets called from a taskqueue or when removing the module.
540 __lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no)
542 struct lio_instr_queue *iq;
548 iq = oct->instr_queue[iq_no];
552 if (atomic_load_acq_int(&iq->instr_pending)) {
553 /* If ticks - last_db_time < db_timeout do nothing */
554 next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout);
555 if (!lio_check_timeout(ticks, next_time))
558 iq->last_db_time = ticks;
560 /* Flush the instruction queue */
561 lio_flush_iq(oct, iq, 0);
563 lio_enable_irq(NULL, iq);
566 if (oct->props.ifp != NULL && iq->br != NULL) {
567 if (mtx_trylock(&iq->enq_lock)) {
568 if (!drbr_empty(oct->props.ifp, iq->br))
569 lio_mq_start_locked(oct->props.ifp, iq);
571 mtx_unlock(&iq->enq_lock);
577 * Called by the Poll thread at regular intervals to check the instruction
578 * queue for commands to be posted and for commands that were fetched by Octeon.
581 lio_check_db_timeout(void *arg, int pending)
583 struct lio_tq *db_tq = (struct lio_tq *)arg;
584 struct octeon_device *oct = db_tq->ctxptr;
585 uint64_t iq_no = db_tq->ctxul;
588 __lio_check_db_timeout(oct, iq_no);
589 taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work,
590 lio_ms_to_ticks(delay));
594 lio_send_command(struct octeon_device *oct, uint32_t iq_no,
595 uint32_t force_db, void *cmd, void *buf,
596 uint32_t datasize, uint32_t reqtype)
598 struct lio_iq_post_status st;
599 struct lio_instr_queue *iq = oct->instr_queue[iq_no];
602 * Get the lock and prevent other tasks and tx interrupt handler
605 mtx_lock(&iq->post_lock);
607 st = __lio_post_command2(iq, cmd);
609 if (st.status != LIO_IQ_SEND_FAILED) {
610 __lio_add_to_request_list(iq, st.index, buf, reqtype);
611 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
612 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
614 if (force_db || (st.status == LIO_IQ_SEND_STOP))
615 lio_ring_doorbell(oct, iq);
617 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
620 mtx_unlock(&iq->post_lock);
623 * This is only done here to expedite packets being flushed for
624 * cases where there are no IQ completion interrupts.
631 lio_prepare_soft_command(struct octeon_device *oct, struct lio_soft_command *sc,
632 uint8_t opcode, uint8_t subcode, uint32_t irh_ossp,
633 uint64_t ossp0, uint64_t ossp1)
635 struct lio_config *lio_cfg;
636 struct octeon_instr_ih3 *ih3;
637 struct octeon_instr_pki_ih3 *pki_ih3;
638 struct octeon_instr_irh *irh;
639 struct octeon_instr_rdp *rdp;
641 KASSERT(opcode <= 15, ("%s, %d, opcode > 15", __func__, __LINE__));
642 KASSERT(subcode <= 127, ("%s, %d, opcode > 127", __func__, __LINE__));
644 lio_cfg = lio_get_conf(oct);
646 if (LIO_CN23XX_PF(oct)) {
647 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
649 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
651 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
656 pki_ih3->uqpg = oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
658 pki_ih3->tag = LIO_CONTROL;
659 pki_ih3->tagtype = LIO_ATOMIC_TAG;
660 pki_ih3->qpg = oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
665 ih3->dlengsz = sc->datasize;
667 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
668 irh->opcode = opcode;
669 irh->subcode = subcode;
671 /* opcode/subcode specific parameters (ossp) */
672 irh->ossp = irh_ossp;
673 sc->cmd.cmd3.ossp[0] = ossp0;
674 sc->cmd.cmd3.ossp[1] = ossp1;
677 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
678 rdp->pcie_port = oct->pcie_port;
679 rdp->rlen = sc->rdatasize;
683 /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
684 ih3->fsz = LIO_SOFTCMDRESP_IH3;
688 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
689 ih3->fsz = LIO_PCICMD_O3;
695 lio_send_soft_command(struct octeon_device *oct, struct lio_soft_command *sc)
697 struct octeon_instr_ih3 *ih3;
698 struct octeon_instr_irh *irh;
701 if (LIO_CN23XX_PF(oct)) {
702 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
704 KASSERT(sc->dmadptr, ("%s, %d, sc->dmadptr is NULL",
705 __func__, __LINE__));
706 sc->cmd.cmd3.dptr = sc->dmadptr;
709 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
711 KASSERT(sc->dmarptr, ("%s, %d, sc->dmarptr is NULL",
712 __func__, __LINE__));
713 KASSERT(sc->status_word, ("%s, %d, sc->status_word is NULL",
714 __func__, __LINE__));
715 *sc->status_word = COMPLETION_WORD_INIT;
716 sc->cmd.cmd3.rptr = sc->dmarptr;
718 len = (uint32_t)ih3->dlengsz;
721 sc->timeout = ticks + lio_ms_to_ticks(sc->wait_time);
723 return (lio_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
724 len, LIO_REQTYPE_SOFT_COMMAND));
728 lio_setup_sc_buffer_pool(struct octeon_device *oct)
730 struct lio_soft_command *sc;
734 STAILQ_INIT(&oct->sc_buf_pool.head);
735 mtx_init(&oct->sc_buf_pool.lock, "sc_pool_lock", NULL, MTX_DEF);
736 atomic_store_rel_int(&oct->sc_buf_pool.alloc_buf_count, 0);
738 for (i = 0; i < LIO_MAX_SOFT_COMMAND_BUFFERS; i++) {
739 sc = (struct lio_soft_command *)
740 lio_dma_alloc(LIO_SOFT_COMMAND_BUFFER_SIZE, (vm_paddr_t *)&dma_addr);
742 lio_free_sc_buffer_pool(oct);
746 sc->dma_addr = dma_addr;
747 sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
749 STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
756 lio_free_sc_buffer_pool(struct octeon_device *oct)
758 struct lio_stailq_node *tmp, *tmp2;
759 struct lio_soft_command *sc;
761 mtx_lock(&oct->sc_buf_pool.lock);
763 STAILQ_FOREACH_SAFE(tmp, &oct->sc_buf_pool.head, entries, tmp2) {
764 sc = LIO_STAILQ_FIRST_ENTRY(&oct->sc_buf_pool.head,
765 struct lio_soft_command, node);
767 STAILQ_REMOVE_HEAD(&oct->sc_buf_pool.head, entries);
769 lio_dma_free(sc->size, sc);
772 STAILQ_INIT(&oct->sc_buf_pool.head);
774 mtx_unlock(&oct->sc_buf_pool.lock);
779 struct lio_soft_command *
780 lio_alloc_soft_command(struct octeon_device *oct, uint32_t datasize,
781 uint32_t rdatasize, uint32_t ctxsize)
783 struct lio_soft_command *sc = NULL;
784 struct lio_stailq_node *tmp;
787 uint32_t offset = sizeof(struct lio_soft_command);
789 KASSERT((offset + datasize + rdatasize + ctxsize) <=
790 LIO_SOFT_COMMAND_BUFFER_SIZE,
791 ("%s, %d, offset + datasize + rdatasize + ctxsize > LIO_SOFT_COMMAND_BUFFER_SIZE",
792 __func__, __LINE__));
794 mtx_lock(&oct->sc_buf_pool.lock);
796 if (STAILQ_EMPTY(&oct->sc_buf_pool.head)) {
797 mtx_unlock(&oct->sc_buf_pool.lock);
800 tmp = STAILQ_LAST(&oct->sc_buf_pool.head, lio_stailq_node, entries);
802 STAILQ_REMOVE(&oct->sc_buf_pool.head, tmp, lio_stailq_node, entries);
804 atomic_add_int(&oct->sc_buf_pool.alloc_buf_count, 1);
806 mtx_unlock(&oct->sc_buf_pool.lock);
808 sc = (struct lio_soft_command *)tmp;
810 dma_addr = sc->dma_addr;
815 sc->dma_addr = dma_addr;
819 sc->ctxptr = (uint8_t *)sc + offset;
820 sc->ctxsize = ctxsize;
823 /* Start data at 128 byte boundary */
824 offset = (offset + ctxsize + 127) & 0xffffff80;
827 sc->virtdptr = (uint8_t *)sc + offset;
828 sc->dmadptr = dma_addr + offset;
829 sc->datasize = datasize;
831 /* Start rdata at 128 byte boundary */
832 offset = (offset + datasize + 127) & 0xffffff80;
835 KASSERT(rdatasize >= 16, ("%s, %d, rdatasize < 16", __func__,
837 sc->virtrptr = (uint8_t *)sc + offset;
838 sc->dmarptr = dma_addr + offset;
839 sc->rdatasize = rdatasize;
840 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
847 lio_free_soft_command(struct octeon_device *oct,
848 struct lio_soft_command *sc)
851 mtx_lock(&oct->sc_buf_pool.lock);
853 STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
855 atomic_subtract_int(&oct->sc_buf_pool.alloc_buf_count, 1);
857 mtx_unlock(&oct->sc_buf_pool.lock);