]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/xdma/xdma_sg.c
Negate the logic of XCHAN_CAP_NOBUFS macro and rename it to
[FreeBSD/FreeBSD.git] / sys / dev / xdma / xdma_sg.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_platform.h"
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
45
46 #include <machine/bus.h>
47
48 #include <vm/vm.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_page.h>
52
53 #ifdef FDT
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57 #endif
58
59 #include <dev/xdma/xdma.h>
60
61 #include <xdma_if.h>
62
63 struct seg_load_request {
64         struct bus_dma_segment *seg;
65         uint32_t nsegs;
66         uint32_t error;
67 };
68
69 static void
70 xchan_bufs_free_reserved(xdma_channel_t *xchan)
71 {
72         struct xdma_request *xr;
73         vm_size_t size;
74         int i;
75
76         for (i = 0; i < xchan->xr_num; i++) {
77                 xr = &xchan->xr_mem[i];
78                 size = xr->buf.size;
79                 if (xr->buf.vaddr) {
80                         pmap_kremove_device(xr->buf.vaddr, size);
81                         kva_free(xr->buf.vaddr, size);
82                         xr->buf.vaddr = 0;
83                 }
84                 if (xr->buf.paddr) {
85                         vmem_free(xchan->vmem, xr->buf.paddr, size);
86                         xr->buf.paddr = 0;
87                 }
88                 xr->buf.size = 0;
89         }
90 }
91
92 static int
93 xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
94 {
95         xdma_controller_t *xdma;
96         struct xdma_request *xr;
97         vmem_addr_t addr;
98         vm_size_t size;
99         int i;
100
101         xdma = xchan->xdma;
102
103         if (xchan->vmem == NULL)
104                 return (ENOBUFS);
105
106         for (i = 0; i < xchan->xr_num; i++) {
107                 xr = &xchan->xr_mem[i];
108                 size = round_page(xchan->maxsegsize);
109                 if (vmem_alloc(xchan->vmem, size,
110                     M_BESTFIT | M_NOWAIT, &addr)) {
111                         device_printf(xdma->dev,
112                             "%s: Can't allocate memory\n", __func__);
113                         xchan_bufs_free_reserved(xchan);
114                         return (ENOMEM);
115                 }
116                 
117                 xr->buf.size = size;
118                 xr->buf.paddr = addr;
119                 xr->buf.vaddr = kva_alloc(size);
120                 if (xr->buf.vaddr == 0) {
121                         device_printf(xdma->dev,
122                             "%s: Can't allocate KVA\n", __func__);
123                         xchan_bufs_free_reserved(xchan);
124                         return (ENOMEM);
125                 }
126                 pmap_kenter_device(xr->buf.vaddr, size, addr);
127         }
128
129         return (0);
130 }
131
132 static int
133 xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
134 {
135         xdma_controller_t *xdma;
136         struct xdma_request *xr;
137         int err;
138         int i;
139
140         xdma = xchan->xdma;
141
142         /* Create bus_dma tag */
143         err = bus_dma_tag_create(
144             bus_get_dma_tag(xdma->dev), /* Parent tag. */
145             xchan->alignment,           /* alignment */
146             xchan->boundary,            /* boundary */
147             xchan->lowaddr,             /* lowaddr */
148             xchan->highaddr,            /* highaddr */
149             NULL, NULL,                 /* filter, filterarg */
150             xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
151             xchan->maxnsegs,            /* nsegments */
152             xchan->maxsegsize,          /* maxsegsize */
153             0,                          /* flags */
154             NULL, NULL,                 /* lockfunc, lockarg */
155             &xchan->dma_tag_bufs);
156         if (err != 0) {
157                 device_printf(xdma->dev,
158                     "%s: Can't create bus_dma tag.\n", __func__);
159                 return (-1);
160         }
161
162         for (i = 0; i < xchan->xr_num; i++) {
163                 xr = &xchan->xr_mem[i];
164                 err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
165                     &xr->buf.map);
166                 if (err != 0) {
167                         device_printf(xdma->dev,
168                             "%s: Can't create buf DMA map.\n", __func__);
169
170                         /* Cleanup. */
171                         bus_dma_tag_destroy(xchan->dma_tag_bufs);
172
173                         return (-1);
174                 }
175         }
176
177         return (0);
178 }
179
180 static int
181 xchan_bufs_alloc(xdma_channel_t *xchan)
182 {
183         xdma_controller_t *xdma;
184         int ret;
185
186         xdma = xchan->xdma;
187
188         if (xdma == NULL) {
189                 device_printf(xdma->dev,
190                     "%s: Channel was not allocated properly.\n", __func__);
191                 return (-1);
192         }
193
194         if (xchan->caps & XCHAN_CAP_BUSDMA)
195                 ret = xchan_bufs_alloc_busdma(xchan);
196         else {
197                 ret = xchan_bufs_alloc_reserved(xchan);
198         }
199         if (ret != 0) {
200                 device_printf(xdma->dev,
201                     "%s: Can't allocate bufs.\n", __func__);
202                 return (-1);
203         }
204
205         xchan->flags |= XCHAN_BUFS_ALLOCATED;
206
207         return (0);
208 }
209
210 static int
211 xchan_bufs_free(xdma_channel_t *xchan)
212 {
213         struct xdma_request *xr;
214         struct xchan_buf *b;
215         int i;
216
217         if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
218                 return (-1);
219
220         if (xchan->caps & XCHAN_CAP_BUSDMA) {
221                 for (i = 0; i < xchan->xr_num; i++) {
222                         xr = &xchan->xr_mem[i];
223                         b = &xr->buf;
224                         bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
225                 }
226                 bus_dma_tag_destroy(xchan->dma_tag_bufs);
227         } else
228                 xchan_bufs_free_reserved(xchan);
229
230         xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
231
232         return (0);
233 }
234
235 void
236 xdma_channel_free_sg(xdma_channel_t *xchan)
237 {
238
239         xchan_bufs_free(xchan);
240         xchan_sglist_free(xchan);
241         xchan_bank_free(xchan);
242 }
243
244 /*
245  * Prepare xchan for a scatter-gather transfer.
246  * xr_num - xdma requests queue size,
247  * maxsegsize - maximum allowed scatter-gather list element size in bytes
248  */
249 int
250 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
251     bus_size_t maxsegsize, bus_size_t maxnsegs,
252     bus_size_t alignment, bus_addr_t boundary,
253     bus_addr_t lowaddr, bus_addr_t highaddr)
254 {
255         xdma_controller_t *xdma;
256         int ret;
257
258         xdma = xchan->xdma;
259
260         KASSERT(xdma != NULL, ("xdma is NULL"));
261
262         if (xchan->flags & XCHAN_CONFIGURED) {
263                 device_printf(xdma->dev,
264                     "%s: Channel is already configured.\n", __func__);
265                 return (-1);
266         }
267
268         xchan->xr_num = xr_num;
269         xchan->maxsegsize = maxsegsize;
270         xchan->maxnsegs = maxnsegs;
271         xchan->alignment = alignment;
272         xchan->boundary = boundary;
273         xchan->lowaddr = lowaddr;
274         xchan->highaddr = highaddr;
275
276         if (xchan->maxnsegs > XDMA_MAX_SEG) {
277                 device_printf(xdma->dev, "%s: maxnsegs is too big\n",
278                     __func__);
279                 return (-1);
280         }
281
282         xchan_bank_init(xchan);
283
284         /* Allocate sglist. */
285         ret = xchan_sglist_alloc(xchan);
286         if (ret != 0) {
287                 device_printf(xdma->dev,
288                     "%s: Can't allocate sglist.\n", __func__);
289                 return (-1);
290         }
291
292         /* Allocate buffers if required. */
293         if (xchan->caps & (XCHAN_CAP_BUSDMA | XCHAN_CAP_BOUNCE)) {
294                 ret = xchan_bufs_alloc(xchan);
295                 if (ret != 0) {
296                         device_printf(xdma->dev,
297                             "%s: Can't allocate bufs.\n", __func__);
298
299                         /* Cleanup */
300                         xchan_sglist_free(xchan);
301                         xchan_bank_free(xchan);
302
303                         return (-1);
304                 }
305         }
306
307         xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
308
309         XCHAN_LOCK(xchan);
310         ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
311         if (ret != 0) {
312                 device_printf(xdma->dev,
313                     "%s: Can't prepare SG transfer.\n", __func__);
314                 XCHAN_UNLOCK(xchan);
315
316                 return (-1);
317         }
318         XCHAN_UNLOCK(xchan);
319
320         return (0);
321 }
322
323 void
324 xchan_seg_done(xdma_channel_t *xchan,
325     struct xdma_transfer_status *st)
326 {
327         struct xdma_request *xr;
328         xdma_controller_t *xdma;
329         struct xchan_buf *b;
330
331         xdma = xchan->xdma;
332
333         xr = TAILQ_FIRST(&xchan->processing);
334         if (xr == NULL)
335                 panic("request not found\n");
336
337         b = &xr->buf;
338
339         atomic_subtract_int(&b->nsegs_left, 1);
340
341         if (b->nsegs_left == 0) {
342                 if (xchan->caps & XCHAN_CAP_BUSDMA) {
343                         if (xr->direction == XDMA_MEM_TO_DEV)
344                                 bus_dmamap_sync(xchan->dma_tag_bufs, b->map, 
345                                     BUS_DMASYNC_POSTWRITE);
346                         else
347                                 bus_dmamap_sync(xchan->dma_tag_bufs, b->map, 
348                                     BUS_DMASYNC_POSTREAD);
349                         bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
350                 } else if (xchan->caps & XCHAN_CAP_BOUNCE) {
351                         if (xr->req_type == XR_TYPE_MBUF &&
352                             xr->direction == XDMA_DEV_TO_MEM)
353                                 m_copyback(xr->m, 0, st->transferred,
354                                     (void *)xr->buf.vaddr);
355                 }
356                 xr->status.error = st->error;
357                 xr->status.transferred = st->transferred;
358
359                 QUEUE_PROC_LOCK(xchan);
360                 TAILQ_REMOVE(&xchan->processing, xr, xr_next);
361                 QUEUE_PROC_UNLOCK(xchan);
362
363                 QUEUE_OUT_LOCK(xchan);
364                 TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
365                 QUEUE_OUT_UNLOCK(xchan);
366         }
367 }
368
369 static void
370 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
371 {
372         struct seg_load_request *slr;
373         struct bus_dma_segment *seg;
374         int i;
375
376         slr = arg;
377         seg = slr->seg;
378
379         if (error != 0) {
380                 slr->error = error;
381                 return;
382         }
383
384         slr->nsegs = nsegs;
385
386         for (i = 0; i < nsegs; i++) {
387                 seg[i].ds_addr = segs[i].ds_addr;
388                 seg[i].ds_len = segs[i].ds_len;
389         }
390 }
391
392 static int
393 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
394     struct bus_dma_segment *seg)
395 {
396         xdma_controller_t *xdma;
397         struct seg_load_request slr;
398         uint32_t nsegs;
399         void *addr;
400         int error;
401
402         xdma = xchan->xdma;
403
404         error = 0;
405         nsegs = 0;
406
407         switch (xr->req_type) {
408         case XR_TYPE_MBUF:
409                 error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
410                     xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
411                 break;
412         case XR_TYPE_BIO:
413                 slr.nsegs = 0;
414                 slr.error = 0;
415                 slr.seg = seg;
416                 error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
417                     xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
418                 if (slr.error != 0) {
419                         device_printf(xdma->dma_dev,
420                             "%s: bus_dmamap_load failed, err %d\n",
421                             __func__, slr.error);
422                         return (0);
423                 }
424                 nsegs = slr.nsegs;
425                 break;
426         case XR_TYPE_VIRT:
427                 switch (xr->direction) {
428                 case XDMA_MEM_TO_DEV:
429                         addr = (void *)xr->src_addr;
430                         break;
431                 case XDMA_DEV_TO_MEM:
432                         addr = (void *)xr->dst_addr;
433                         break;
434                 default:
435                         device_printf(xdma->dma_dev,
436                             "%s: Direction is not supported\n", __func__);
437                         return (0);
438                 }
439                 slr.nsegs = 0;
440                 slr.error = 0;
441                 slr.seg = seg;
442                 error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
443                     addr, (xr->block_len * xr->block_num),
444                     xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
445                 if (slr.error != 0) {
446                         device_printf(xdma->dma_dev,
447                             "%s: bus_dmamap_load failed, err %d\n",
448                             __func__, slr.error);
449                         return (0);
450                 }
451                 nsegs = slr.nsegs;
452                 break;
453         default:
454                 break;
455         }
456
457         if (error != 0) {
458                 if (error == ENOMEM) {
459                         /*
460                          * Out of memory. Try again later.
461                          * TODO: count errors.
462                          */
463                 } else
464                         device_printf(xdma->dma_dev,
465                             "%s: bus_dmamap_load failed with err %d\n",
466                             __func__, error);
467                 return (0);
468         }
469
470         if (xr->direction == XDMA_MEM_TO_DEV)
471                 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
472                     BUS_DMASYNC_PREWRITE);
473         else
474                 bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
475                     BUS_DMASYNC_PREREAD);
476
477         return (nsegs);
478 }
479
480 static int
481 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
482     struct bus_dma_segment *seg)
483 {
484         xdma_controller_t *xdma;
485         struct mbuf *m;
486         uint32_t nsegs;
487
488         xdma = xchan->xdma;
489
490         m = xr->m;
491
492         nsegs = 1;
493
494         switch (xr->req_type) {
495         case XR_TYPE_MBUF:
496                 if (xchan->caps & XCHAN_CAP_BUSDMA)
497                         seg[0].ds_addr = mtod(m, bus_addr_t);
498                 else if (xchan->caps & XCHAN_CAP_BOUNCE) {
499                         if (xr->direction == XDMA_MEM_TO_DEV)
500                                 m_copydata(m, 0, m->m_pkthdr.len,
501                                     (void *)xr->buf.vaddr);
502                         seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
503                 }
504                 seg[0].ds_len = m->m_pkthdr.len;
505                 break;
506         case XR_TYPE_BIO:
507         case XR_TYPE_VIRT:
508         default:
509                 panic("implement me\n");
510         }
511
512         return (nsegs);
513 }
514
515 static int
516 xdma_load_data(xdma_channel_t *xchan,
517     struct xdma_request *xr, struct bus_dma_segment *seg)
518 {
519         xdma_controller_t *xdma;
520         int error;
521         int nsegs;
522
523         xdma = xchan->xdma;
524
525         error = 0;
526         nsegs = 0;
527
528         if (xchan->caps & XCHAN_CAP_BUSDMA)
529                 nsegs = _xdma_load_data_busdma(xchan, xr, seg);
530         else
531                 nsegs = _xdma_load_data(xchan, xr, seg);
532         if (nsegs == 0)
533                 return (0); /* Try again later. */
534
535         xr->buf.nsegs = nsegs;
536         xr->buf.nsegs_left = nsegs;
537
538         return (nsegs);
539 }
540
541 static int
542 xdma_process(xdma_channel_t *xchan,
543     struct xdma_sglist *sg)
544 {
545         struct bus_dma_segment seg[XDMA_MAX_SEG];
546         struct xdma_request *xr;
547         struct xdma_request *xr_tmp;
548         xdma_controller_t *xdma;
549         uint32_t capacity;
550         uint32_t n;
551         uint32_t c;
552         int nsegs;
553         int ret;
554
555         XCHAN_ASSERT_LOCKED(xchan);
556
557         xdma = xchan->xdma;
558
559         n = 0;
560         c = 0;
561
562         ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
563         if (ret != 0) {
564                 device_printf(xdma->dev,
565                     "%s: Can't get DMA controller capacity.\n", __func__);
566                 return (-1);
567         }
568
569         TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
570                 switch (xr->req_type) {
571                 case XR_TYPE_MBUF:
572                         if ((xchan->caps & XCHAN_CAP_NOSEG) ||
573                             (c > xchan->maxnsegs))
574                                 c = xdma_mbuf_defrag(xchan, xr);
575                         break;
576                 case XR_TYPE_BIO:
577                 case XR_TYPE_VIRT:
578                 default:
579                         c = 1;
580                 }
581
582                 if (capacity <= (c + n)) {
583                         /*
584                          * No space yet available for the entire
585                          * request in the DMA engine.
586                          */
587                         break;
588                 }
589
590                 if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
591                         /* Sglist is full. */
592                         break;
593                 }
594
595                 nsegs = xdma_load_data(xchan, xr, seg);
596                 if (nsegs == 0)
597                         break;
598
599                 xdma_sglist_add(&sg[n], seg, nsegs, xr);
600                 n += nsegs;
601
602                 QUEUE_IN_LOCK(xchan);
603                 TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
604                 QUEUE_IN_UNLOCK(xchan);
605
606                 QUEUE_PROC_LOCK(xchan);
607                 TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
608                 QUEUE_PROC_UNLOCK(xchan);
609         }
610
611         return (n);
612 }
613
614 int
615 xdma_queue_submit_sg(xdma_channel_t *xchan)
616 {
617         struct xdma_sglist *sg;
618         xdma_controller_t *xdma;
619         uint32_t sg_n;
620         int ret;
621
622         xdma = xchan->xdma;
623         KASSERT(xdma != NULL, ("xdma is NULL"));
624
625         XCHAN_ASSERT_LOCKED(xchan);
626
627         sg = xchan->sg;
628
629         if ((xchan->caps & (XCHAN_CAP_BOUNCE | XCHAN_CAP_BUSDMA)) &&
630            (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
631                 device_printf(xdma->dev,
632                     "%s: Can't submit a transfer: no bufs\n",
633                     __func__);
634                 return (-1);
635         }
636
637         sg_n = xdma_process(xchan, sg);
638         if (sg_n == 0)
639                 return (0); /* Nothing to submit */
640
641         /* Now submit sglist to DMA engine driver. */
642         ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
643         if (ret != 0) {
644                 device_printf(xdma->dev,
645                     "%s: Can't submit an sglist.\n", __func__);
646                 return (-1);
647         }
648
649         return (0);
650 }