]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/liquidio/base/lio_request_manager.c
MFV r324145,324147:
[FreeBSD/FreeBSD.git] / sys / dev / liquidio / base / lio_request_manager.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Cavium, Inc. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /*$FreeBSD$*/
34
35 #include "lio_bsd.h"
36 #include "lio_common.h"
37 #include "lio_droq.h"
38 #include "lio_iq.h"
39 #include "lio_response_manager.h"
40 #include "lio_device.h"
41 #include "lio_main.h"
42 #include "lio_network.h"
43 #include "cn23xx_pf_device.h"
44 #include "lio_rxtx.h"
45
46 struct lio_iq_post_status {
47         int     status;
48         int     index;
49 };
50
51 static void     lio_check_db_timeout(void *arg, int pending);
52 static void     __lio_check_db_timeout(struct octeon_device *oct,
53                                        uint64_t iq_no);
54
55 /* Return 0 on success, 1 on failure */
56 int
57 lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq,
58                      uint32_t num_descs)
59 {
60         struct lio_instr_queue  *iq;
61         struct lio_iq_config    *conf = NULL;
62         struct lio_tq           *db_tq;
63         struct lio_request_list *request_buf;
64         bus_size_t              max_size;
65         uint32_t                iq_no = (uint32_t)txpciq.s.q_no;
66         uint32_t                q_size;
67         int                     error, i;
68
69         if (LIO_CN23XX_PF(oct))
70                 conf = &(LIO_GET_IQ_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)));
71         if (conf == NULL) {
72                 lio_dev_err(oct, "Unsupported Chip %x\n", oct->chip_id);
73                 return (1);
74         }
75
76         q_size = (uint32_t)conf->instr_type * num_descs;
77         iq = oct->instr_queue[iq_no];
78         iq->oct_dev = oct;
79
80         max_size = LIO_CN23XX_PKI_MAX_FRAME_SIZE * num_descs;
81
82         error = bus_dma_tag_create(bus_get_dma_tag(oct->device),        /* parent */
83                                    1, 0,                                /* alignment, bounds */
84                                    BUS_SPACE_MAXADDR,                   /* lowaddr */
85                                    BUS_SPACE_MAXADDR,                   /* highaddr */
86                                    NULL, NULL,                          /* filter, filterarg */
87                                    max_size,                            /* maxsize */
88                                    LIO_MAX_SG,                          /* nsegments */
89                                    PAGE_SIZE,                           /* maxsegsize */
90                                    0,                                   /* flags */
91                                    NULL,                                /* lockfunc */
92                                    NULL,                                /* lockfuncarg */
93                                    &iq->txtag);
94         if (error) {
95                 lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
96                             iq_no);
97                 return (1);
98         }
99
100         iq->base_addr = lio_dma_alloc(q_size, &iq->base_addr_dma);
101         if (!iq->base_addr) {
102                 lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
103                             iq_no);
104                 return (1);
105         }
106
107         iq->max_count = num_descs;
108
109         /*
110          * Initialize a list to holds requests that have been posted to
111          * Octeon but has yet to be fetched by octeon
112          */
113         iq->request_list = malloc(sizeof(*iq->request_list) * num_descs,
114                                   M_DEVBUF, M_NOWAIT | M_ZERO);
115         if (iq->request_list == NULL) {
116                 lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n",
117                             iq_no);
118                 return (1);
119         }
120
121         lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %lx count: %d\n",
122                     iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
123
124         /* Create the descriptor buffer dma maps */
125         request_buf = iq->request_list;
126         for (i = 0; i < num_descs; i++, request_buf++) {
127                 error = bus_dmamap_create(iq->txtag, 0, &request_buf->map);
128                 if (error) {
129                         lio_dev_err(oct, "Unable to create TX DMA map\n");
130                         return (1);
131                 }
132         }
133
134         iq->txpciq.txpciq64 = txpciq.txpciq64;
135         iq->fill_cnt = 0;
136         iq->host_write_index = 0;
137         iq->octeon_read_index = 0;
138         iq->flush_index = 0;
139         iq->last_db_time = 0;
140         iq->db_timeout = (uint32_t)conf->db_timeout;
141         atomic_store_rel_int(&iq->instr_pending, 0);
142
143         /* Initialize the lock for this instruction queue */
144         mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF);
145         mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF);
146         mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF);
147
148         mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL,
149                  MTX_DEF);
150
151         oct->io_qmask.iq |= BIT_ULL(iq_no);
152
153         /* Set the 32B/64B mode for each input queue */
154         oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
155         iq->iqcmd_64B = (conf->instr_type == 64);
156
157         oct->fn_list.setup_iq_regs(oct, iq_no);
158
159         db_tq = &oct->check_db_tq[iq_no];
160         db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK,
161                                      taskqueue_thread_enqueue, &db_tq->tq);
162         if (db_tq->tq == NULL) {
163                 lio_dev_err(oct, "check db wq create failed for iq %d\n",
164                             iq_no);
165                 return (1);
166         }
167
168         TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout,
169                           (void *)db_tq);
170         db_tq->ctxul = iq_no;
171         db_tq->ctxptr = oct;
172
173         taskqueue_start_threads(&db_tq->tq, 1, PI_NET,
174                                 "lio%d_check_db_timeout:%d",
175                                 oct->octeon_id, iq_no);
176         taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 1);
177
178         /* Allocate a buf ring */
179         oct->instr_queue[iq_no]->br =
180                 buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK,
181                                &oct->instr_queue[iq_no]->enq_lock);
182         if (oct->instr_queue[iq_no]->br == NULL) {
183                 lio_dev_err(oct, "Critical Failure setting up buf ring\n");
184                 return (1);
185         }
186
187         return (0);
188 }
189
190 int
191 lio_delete_instr_queue(struct octeon_device *oct, uint32_t iq_no)
192 {
193         struct lio_instr_queue          *iq = oct->instr_queue[iq_no];
194         struct lio_request_list         *request_buf;
195         struct lio_mbuf_free_info       *finfo;
196         uint64_t                        desc_size = 0, q_size;
197         int                             i;
198
199         lio_dev_dbg(oct, "%s[%d]\n", __func__, iq_no);
200
201         if (oct->check_db_tq[iq_no].tq != NULL) {
202                 while (taskqueue_cancel_timeout(oct->check_db_tq[iq_no].tq,
203                                                 &oct->check_db_tq[iq_no].work,
204                                                 NULL))
205                         taskqueue_drain_timeout(oct->check_db_tq[iq_no].tq,
206                                                 &oct->check_db_tq[iq_no].work);
207                 taskqueue_free(oct->check_db_tq[iq_no].tq);
208                 oct->check_db_tq[iq_no].tq = NULL;
209         }
210
211         if (LIO_CN23XX_PF(oct))
212                 desc_size =
213                     LIO_GET_IQ_INSTR_TYPE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf));
214
215         request_buf = iq->request_list;
216         for (i = 0; i < iq->max_count; i++, request_buf++) {
217                 if ((request_buf->reqtype == LIO_REQTYPE_NORESP_NET) ||
218                     (request_buf->reqtype == LIO_REQTYPE_NORESP_NET_SG)) {
219                         if (request_buf->buf != NULL) {
220                                 finfo = request_buf->buf;
221                                 bus_dmamap_sync(iq->txtag, request_buf->map,
222                                                 BUS_DMASYNC_POSTWRITE);
223                                 bus_dmamap_unload(iq->txtag,
224                                                   request_buf->map);
225                                 m_freem(finfo->mb);
226                                 request_buf->buf = NULL;
227                                 if (request_buf->map != NULL) {
228                                         bus_dmamap_destroy(iq->txtag,
229                                                            request_buf->map);
230                                         request_buf->map = NULL;
231                                 }
232                         } else if (request_buf->map != NULL) {
233                                 bus_dmamap_unload(iq->txtag, request_buf->map);
234                                 bus_dmamap_destroy(iq->txtag, request_buf->map);
235                                 request_buf->map = NULL;
236                         }
237                 }
238         }
239
240         if (iq->br != NULL) {
241                 buf_ring_free(iq->br, M_DEVBUF);
242                 iq->br = NULL;
243         }
244
245         if (iq->request_list != NULL) {
246                 free(iq->request_list, M_DEVBUF);
247                 iq->request_list = NULL;
248         }
249
250         if (iq->txtag != NULL) {
251                 bus_dma_tag_destroy(iq->txtag);
252                 iq->txtag = NULL;
253         }
254
255         if (iq->base_addr) {
256                 q_size = iq->max_count * desc_size;
257                 lio_dma_free((uint32_t)q_size, iq->base_addr);
258
259                 oct->io_qmask.iq &= ~(1ULL << iq_no);
260                 bzero(oct->instr_queue[iq_no], sizeof(struct lio_instr_queue));
261                 oct->num_iqs--;
262
263                 return (0);
264         }
265
266         return (1);
267 }
268
269 /* Return 0 on success, 1 on failure */
270 int
271 lio_setup_iq(struct octeon_device *oct, int ifidx, int q_index,
272              union octeon_txpciq txpciq, uint32_t num_descs)
273 {
274         uint32_t        iq_no = (uint32_t)txpciq.s.q_no;
275
276         if (oct->instr_queue[iq_no]->oct_dev != NULL) {
277                 lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n",
278                             iq_no);
279                 oct->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
280                 return (0);
281         }
282
283         oct->instr_queue[iq_no]->q_index = q_index;
284         oct->instr_queue[iq_no]->ifidx = ifidx;
285
286         if (lio_init_instr_queue(oct, txpciq, num_descs)) {
287                 lio_delete_instr_queue(oct, iq_no);
288                 return (1);
289         }
290
291         oct->num_iqs++;
292         if (oct->fn_list.enable_io_queues(oct))
293                 return (1);
294
295         return (0);
296 }
297
298 int
299 lio_wait_for_instr_fetch(struct octeon_device *oct)
300 {
301         int     i, retry = 1000, pending, instr_cnt = 0;
302
303         do {
304                 instr_cnt = 0;
305
306                 for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
307                         if (!(oct->io_qmask.iq & BIT_ULL(i)))
308                                 continue;
309                         pending = atomic_load_acq_int(
310                                         &oct->instr_queue[i]->instr_pending);
311                         if (pending)
312                                 __lio_check_db_timeout(oct, i);
313                         instr_cnt += pending;
314                 }
315
316                 if (instr_cnt == 0)
317                         break;
318
319                 lio_sleep_timeout(1);
320
321         } while (retry-- && instr_cnt);
322
323         return (instr_cnt);
324 }
325
326 static inline void
327 lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq)
328 {
329
330         if (atomic_load_acq_int(&oct->status) == LIO_DEV_RUNNING) {
331                 lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt);
332                 /* make sure doorbell write goes through */
333                 __compiler_membar();
334                 iq->fill_cnt = 0;
335                 iq->last_db_time = ticks;
336                 return;
337         }
338 }
339
340 static inline void
341 __lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
342 {
343         uint8_t *iqptr, cmdsize;
344
345         cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
346         iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
347
348         memcpy(iqptr, cmd, cmdsize);
349 }
350
351 static inline struct lio_iq_post_status
352 __lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
353 {
354         struct lio_iq_post_status       st;
355
356         st.status = LIO_IQ_SEND_OK;
357
358         /*
359          * This ensures that the read index does not wrap around to the same
360          * position if queue gets full before Octeon could fetch any instr.
361          */
362         if (atomic_load_acq_int(&iq->instr_pending) >=
363             (int32_t)(iq->max_count - 1)) {
364                 st.status = LIO_IQ_SEND_FAILED;
365                 st.index = -1;
366                 return (st);
367         }
368
369         if (atomic_load_acq_int(&iq->instr_pending) >=
370             (int32_t)(iq->max_count - 2))
371                 st.status = LIO_IQ_SEND_STOP;
372
373         __lio_copy_cmd_into_iq(iq, cmd);
374
375         /* "index" is returned, host_write_index is modified. */
376         st.index = iq->host_write_index;
377         iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
378                                               iq->max_count);
379         iq->fill_cnt++;
380
381         /*
382          * Flush the command into memory. We need to be sure the data is in
383          * memory before indicating that the instruction is pending.
384          */
385         wmb();
386
387         atomic_add_int(&iq->instr_pending, 1);
388
389         return (st);
390 }
391
392 static inline void
393 __lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf,
394                           int reqtype)
395 {
396
397         iq->request_list[idx].buf = buf;
398         iq->request_list[idx].reqtype = reqtype;
399 }
400
401 /* Can only run in process context */
402 int
403 lio_process_iq_request_list(struct octeon_device *oct,
404                             struct lio_instr_queue *iq, uint32_t budget)
405 {
406         struct lio_soft_command         *sc;
407         struct octeon_instr_irh         *irh = NULL;
408         struct lio_mbuf_free_info       *finfo;
409         void                            *buf;
410         uint32_t                        inst_count = 0;
411         uint32_t                        old = iq->flush_index;
412         int                             reqtype;
413
414         while (old != iq->octeon_read_index) {
415                 reqtype = iq->request_list[old].reqtype;
416                 buf = iq->request_list[old].buf;
417                 finfo = buf;
418
419                 if (reqtype == LIO_REQTYPE_NONE)
420                         goto skip_this;
421
422                 switch (reqtype) {
423                 case LIO_REQTYPE_NORESP_NET:
424                         lio_free_mbuf(iq, buf);
425                         break;
426                 case LIO_REQTYPE_NORESP_NET_SG:
427                         lio_free_sgmbuf(iq, buf);
428                         break;
429                 case LIO_REQTYPE_RESP_NET:
430                 case LIO_REQTYPE_SOFT_COMMAND:
431                         sc = buf;
432                         if (LIO_CN23XX_PF(oct))
433                                 irh = (struct octeon_instr_irh *)
434                                         &sc->cmd.cmd3.irh;
435                         if (irh->rflag) {
436                                 /*
437                                  * We're expecting a response from Octeon.
438                                  * It's up to lio_process_ordered_list() to
439                                  * process  sc. Add sc to the ordered soft
440                                  * command response list because we expect
441                                  * a response from Octeon.
442                                  */
443                                 mtx_lock(&oct->response_list
444                                          [LIO_ORDERED_SC_LIST].lock);
445                                 atomic_add_int(&oct->response_list
446                                                [LIO_ORDERED_SC_LIST].
447                                                pending_req_count, 1);
448                                 STAILQ_INSERT_TAIL(&oct->response_list
449                                                    [LIO_ORDERED_SC_LIST].
450                                                    head, &sc->node, entries);
451                                 mtx_unlock(&oct->response_list
452                                            [LIO_ORDERED_SC_LIST].lock);
453                         } else {
454                                 if (sc->callback != NULL) {
455                                         /* This callback must not sleep */
456                                         sc->callback(oct, LIO_REQUEST_DONE,
457                                                      sc->callback_arg);
458                                 }
459                         }
460
461                         break;
462                 default:
463                         lio_dev_err(oct, "%s Unknown reqtype: %d buf: %p at idx %d\n",
464                                     __func__, reqtype, buf, old);
465                 }
466
467                 iq->request_list[old].buf = NULL;
468                 iq->request_list[old].reqtype = 0;
469
470 skip_this:
471                 inst_count++;
472                 old = lio_incr_index(old, 1, iq->max_count);
473
474                 if ((budget) && (inst_count >= budget))
475                         break;
476         }
477
478         iq->flush_index = old;
479
480         return (inst_count);
481 }
482
483 /* Can only be called from process context */
484 int
485 lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq,
486              uint32_t budget)
487 {
488         uint32_t        inst_processed = 0;
489         uint32_t        tot_inst_processed = 0;
490         int             tx_done = 1;
491
492         if (!mtx_trylock(&iq->iq_flush_running_lock))
493                 return (tx_done);
494
495         mtx_lock(&iq->lock);
496
497         iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
498
499         do {
500                 /* Process any outstanding IQ packets. */
501                 if (iq->flush_index == iq->octeon_read_index)
502                         break;
503
504                 if (budget)
505                         inst_processed =
506                                 lio_process_iq_request_list(oct, iq,
507                                                             budget -
508                                                             tot_inst_processed);
509                 else
510                         inst_processed =
511                                 lio_process_iq_request_list(oct, iq, 0);
512
513                 if (inst_processed) {
514                         atomic_subtract_int(&iq->instr_pending, inst_processed);
515                         iq->stats.instr_processed += inst_processed;
516                 }
517                 tot_inst_processed += inst_processed;
518                 inst_processed = 0;
519
520         } while (tot_inst_processed < budget);
521
522         if (budget && (tot_inst_processed >= budget))
523                 tx_done = 0;
524
525         iq->last_db_time = ticks;
526
527         mtx_unlock(&iq->lock);
528
529         mtx_unlock(&iq->iq_flush_running_lock);
530
531         return (tx_done);
532 }
533
534 /*
535  * Process instruction queue after timeout.
536  * This routine gets called from a taskqueue or when removing the module.
537  */
538 static void
539 __lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no)
540 {
541         struct lio_instr_queue  *iq;
542         uint64_t                next_time;
543
544         if (oct == NULL)
545                 return;
546
547         iq = oct->instr_queue[iq_no];
548         if (iq == NULL)
549                 return;
550
551         if (atomic_load_acq_int(&iq->instr_pending)) {
552                 /* If ticks - last_db_time < db_timeout do nothing  */
553                 next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout);
554                 if (!lio_check_timeout(ticks, next_time))
555                         return;
556
557                 iq->last_db_time = ticks;
558
559                 /* Flush the instruction queue */
560                 lio_flush_iq(oct, iq, 0);
561
562                 lio_enable_irq(NULL, iq);
563         }
564
565         if (oct->props.ifp != NULL && iq->br != NULL) {
566                 if (mtx_trylock(&iq->enq_lock)) {
567                         if (!drbr_empty(oct->props.ifp, iq->br))
568                                 lio_mq_start_locked(oct->props.ifp, iq);
569
570                         mtx_unlock(&iq->enq_lock);
571                 }
572         }
573 }
574
575 /*
576  * Called by the Poll thread at regular intervals to check the instruction
577  * queue for commands to be posted and for commands that were fetched by Octeon.
578  */
579 static void
580 lio_check_db_timeout(void *arg, int pending)
581 {
582         struct lio_tq           *db_tq = (struct lio_tq *)arg;
583         struct octeon_device    *oct = db_tq->ctxptr;
584         uint64_t                iq_no = db_tq->ctxul;
585         uint32_t                delay = 10;
586
587         __lio_check_db_timeout(oct, iq_no);
588         taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work,
589                                   lio_ms_to_ticks(delay));
590 }
591
592 int
593 lio_send_command(struct octeon_device *oct, uint32_t iq_no,
594                  uint32_t force_db, void *cmd, void *buf,
595                  uint32_t datasize, uint32_t reqtype)
596 {
597         struct lio_iq_post_status       st;
598         struct lio_instr_queue          *iq = oct->instr_queue[iq_no];
599
600         /*
601          * Get the lock and prevent other tasks and tx interrupt handler
602          * from running.
603          */
604         mtx_lock(&iq->post_lock);
605
606         st = __lio_post_command2(iq, cmd);
607
608         if (st.status != LIO_IQ_SEND_FAILED) {
609                 __lio_add_to_request_list(iq, st.index, buf, reqtype);
610                 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
611                 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
612
613                 if (force_db || (st.status == LIO_IQ_SEND_STOP))
614                         lio_ring_doorbell(oct, iq);
615         } else {
616                 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
617         }
618
619         mtx_unlock(&iq->post_lock);
620
621         /*
622          * This is only done here to expedite packets being flushed for
623          * cases where there are no IQ completion interrupts.
624          */
625
626         return (st.status);
627 }
628
629 void
630 lio_prepare_soft_command(struct octeon_device *oct, struct lio_soft_command *sc,
631                          uint8_t opcode, uint8_t subcode, uint32_t irh_ossp,
632                          uint64_t ossp0, uint64_t ossp1)
633 {
634         struct lio_config               *lio_cfg;
635         struct octeon_instr_ih3         *ih3;
636         struct octeon_instr_pki_ih3     *pki_ih3;
637         struct octeon_instr_irh         *irh;
638         struct octeon_instr_rdp         *rdp;
639
640         KASSERT(opcode <= 15, ("%s, %d, opcode > 15", __func__, __LINE__));
641         KASSERT(subcode <= 127, ("%s, %d, opcode > 127", __func__, __LINE__));
642
643         lio_cfg = lio_get_conf(oct);
644
645         if (LIO_CN23XX_PF(oct)) {
646                 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
647
648                 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
649
650                 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
651
652                 pki_ih3->w = 1;
653                 pki_ih3->raw = 1;
654                 pki_ih3->utag = 1;
655                 pki_ih3->uqpg = oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
656                 pki_ih3->utt = 1;
657                 pki_ih3->tag = LIO_CONTROL;
658                 pki_ih3->tagtype = LIO_ATOMIC_TAG;
659                 pki_ih3->qpg = oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
660                 pki_ih3->pm = 0x7;
661                 pki_ih3->sl = 8;
662
663                 if (sc->datasize)
664                         ih3->dlengsz = sc->datasize;
665
666                 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
667                 irh->opcode = opcode;
668                 irh->subcode = subcode;
669
670                 /* opcode/subcode specific parameters (ossp) */
671                 irh->ossp = irh_ossp;
672                 sc->cmd.cmd3.ossp[0] = ossp0;
673                 sc->cmd.cmd3.ossp[1] = ossp1;
674
675                 if (sc->rdatasize) {
676                         rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
677                         rdp->pcie_port = oct->pcie_port;
678                         rdp->rlen = sc->rdatasize;
679
680                         irh->rflag = 1;
681                         /* PKI IH3 */
682                         /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
683                         ih3->fsz = LIO_SOFTCMDRESP_IH3;
684                 } else {
685                         irh->rflag = 0;
686                         /* PKI IH3 */
687                         /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
688                         ih3->fsz = LIO_PCICMD_O3;
689                 }
690         }
691 }
692
693 int
694 lio_send_soft_command(struct octeon_device *oct, struct lio_soft_command *sc)
695 {
696         struct octeon_instr_ih3 *ih3;
697         struct octeon_instr_irh *irh;
698         uint32_t                len = 0;
699
700         if (LIO_CN23XX_PF(oct)) {
701                 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
702                 if (ih3->dlengsz) {
703                         KASSERT(sc->dmadptr, ("%s, %d, sc->dmadptr is NULL",
704                                               __func__, __LINE__));
705                         sc->cmd.cmd3.dptr = sc->dmadptr;
706                 }
707
708                 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
709                 if (irh->rflag) {
710                         KASSERT(sc->dmarptr, ("%s, %d, sc->dmarptr is NULL",
711                                               __func__, __LINE__));
712                         KASSERT(sc->status_word, ("%s, %d, sc->status_word is NULL",
713                                                   __func__, __LINE__));
714                         *sc->status_word = COMPLETION_WORD_INIT;
715                         sc->cmd.cmd3.rptr = sc->dmarptr;
716                 }
717                 len = (uint32_t)ih3->dlengsz;
718         }
719         if (sc->wait_time)
720                 sc->timeout = ticks + lio_ms_to_ticks(sc->wait_time);
721
722         return (lio_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
723                                  len, LIO_REQTYPE_SOFT_COMMAND));
724 }
725
726 int
727 lio_setup_sc_buffer_pool(struct octeon_device *oct)
728 {
729         struct lio_soft_command *sc;
730         uint64_t                dma_addr;
731         int                     i;
732
733         STAILQ_INIT(&oct->sc_buf_pool.head);
734         mtx_init(&oct->sc_buf_pool.lock, "sc_pool_lock", NULL, MTX_DEF);
735         atomic_store_rel_int(&oct->sc_buf_pool.alloc_buf_count, 0);
736
737         for (i = 0; i < LIO_MAX_SOFT_COMMAND_BUFFERS; i++) {
738                 sc = (struct lio_soft_command *)
739                         lio_dma_alloc(LIO_SOFT_COMMAND_BUFFER_SIZE, &dma_addr);
740                 if (sc == NULL) {
741                         lio_free_sc_buffer_pool(oct);
742                         return (1);
743                 }
744
745                 sc->dma_addr = dma_addr;
746                 sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
747
748                 STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
749         }
750
751         return (0);
752 }
753
754 int
755 lio_free_sc_buffer_pool(struct octeon_device *oct)
756 {
757         struct lio_stailq_node  *tmp, *tmp2;
758         struct lio_soft_command *sc;
759
760         mtx_lock(&oct->sc_buf_pool.lock);
761
762         STAILQ_FOREACH_SAFE(tmp, &oct->sc_buf_pool.head, entries, tmp2) {
763                 sc = LIO_STAILQ_FIRST_ENTRY(&oct->sc_buf_pool.head,
764                                             struct lio_soft_command, node);
765
766                 STAILQ_REMOVE_HEAD(&oct->sc_buf_pool.head, entries);
767
768                 lio_dma_free(sc->size, sc);
769         }
770
771         STAILQ_INIT(&oct->sc_buf_pool.head);
772
773         mtx_unlock(&oct->sc_buf_pool.lock);
774
775         return (0);
776 }
777
778 struct lio_soft_command *
779 lio_alloc_soft_command(struct octeon_device *oct, uint32_t datasize,
780                        uint32_t rdatasize, uint32_t ctxsize)
781 {
782         struct lio_soft_command *sc = NULL;
783         struct lio_stailq_node  *tmp;
784         uint64_t                dma_addr;
785         uint32_t                size;
786         uint32_t                offset = sizeof(struct lio_soft_command);
787
788         KASSERT((offset + datasize + rdatasize + ctxsize) <=
789                 LIO_SOFT_COMMAND_BUFFER_SIZE,
790                 ("%s, %d, offset + datasize + rdatasize + ctxsize > LIO_SOFT_COMMAND_BUFFER_SIZE",
791                  __func__, __LINE__));
792
793         mtx_lock(&oct->sc_buf_pool.lock);
794
795         if (STAILQ_EMPTY(&oct->sc_buf_pool.head)) {
796                 mtx_unlock(&oct->sc_buf_pool.lock);
797                 return (NULL);
798         }
799         tmp = STAILQ_LAST(&oct->sc_buf_pool.head, lio_stailq_node, entries);
800
801         STAILQ_REMOVE(&oct->sc_buf_pool.head, tmp, lio_stailq_node, entries);
802
803         atomic_add_int(&oct->sc_buf_pool.alloc_buf_count, 1);
804
805         mtx_unlock(&oct->sc_buf_pool.lock);
806
807         sc = (struct lio_soft_command *)tmp;
808
809         dma_addr = sc->dma_addr;
810         size = sc->size;
811
812         bzero(sc, sc->size);
813
814         sc->dma_addr = dma_addr;
815         sc->size = size;
816
817         if (ctxsize) {
818                 sc->ctxptr = (uint8_t *)sc + offset;
819                 sc->ctxsize = ctxsize;
820         }
821
822         /* Start data at 128 byte boundary */
823         offset = (offset + ctxsize + 127) & 0xffffff80;
824
825         if (datasize) {
826                 sc->virtdptr = (uint8_t *)sc + offset;
827                 sc->dmadptr = dma_addr + offset;
828                 sc->datasize = datasize;
829         }
830         /* Start rdata at 128 byte boundary */
831         offset = (offset + datasize + 127) & 0xffffff80;
832
833         if (rdatasize) {
834                 KASSERT(rdatasize >= 16, ("%s, %d, rdatasize < 16", __func__,
835                                           __LINE__));
836                 sc->virtrptr = (uint8_t *)sc + offset;
837                 sc->dmarptr = dma_addr + offset;
838                 sc->rdatasize = rdatasize;
839                 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
840                                                rdatasize - 8);
841         }
842         return (sc);
843 }
844
845 void
846 lio_free_soft_command(struct octeon_device *oct,
847                       struct lio_soft_command *sc)
848 {
849
850         mtx_lock(&oct->sc_buf_pool.lock);
851
852         STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
853
854         atomic_subtract_int(&oct->sc_buf_pool.alloc_buf_count, 1);
855
856         mtx_unlock(&oct->sc_buf_pool.lock);
857 }