]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_busdma_bounce.c
Merge bmake-20230414
[FreeBSD/FreeBSD.git] / sys / kern / subr_busdma_bounce.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 /*
30  * Common code for managing bounce pages for bus_dma backends.  As
31  * this code currently assumes it can access internal members of
32  * opaque types like bus_dma_tag_t and bus_dmamap it is #include'd in
33  * backends rather than being compiled standalone.
34  *
35  * Prerequisites:
36  *
37  * - M_BUSDMA malloc type
38  * - struct bus_dmamap
39  * - hw_busdma SYSCTL_NODE
40  * - macros to access the following fields of bus_dma_tag_t:
41  *   - dmat_alignment()
42  *   - dmat_flags()
43  *   - dmat_lowaddr()
44  *   - dmat_lockfunc()
45  *   - dmat_lockarg()
46  */
47
48 #include <sys/kthread.h>
49 #include <sys/sched.h>
50
51 struct bounce_page {
52         vm_offset_t     vaddr;          /* kva of bounce buffer */
53         bus_addr_t      busaddr;        /* Physical address */
54         vm_offset_t     datavaddr;      /* kva of client data */
55 #if defined(__amd64__) || defined(__i386__)
56         vm_page_t       datapage[2];    /* physical page(s) of client data */
57 #else
58         vm_page_t       datapage;       /* physical page of client data */
59 #endif
60         vm_offset_t     dataoffs;       /* page offset of client data */
61         bus_size_t      datacount;      /* client data count */
62         STAILQ_ENTRY(bounce_page) links;
63 };
64
65 struct bounce_zone {
66         STAILQ_ENTRY(bounce_zone) links;
67         STAILQ_HEAD(, bounce_page) bounce_page_list;
68         STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
69         int             total_bpages;
70         int             free_bpages;
71         int             reserved_bpages;
72         int             active_bpages;
73         int             total_bounced;
74         int             total_deferred;
75         int             map_count;
76 #ifdef dmat_domain
77         int             domain;
78 #endif
79         bus_size_t      alignment;
80         bus_addr_t      lowaddr;
81         char            zoneid[8];
82         char            lowaddrid[20];
83         struct sysctl_ctx_list sysctl_tree;
84         struct sysctl_oid *sysctl_tree_top;
85 };
86
87 static struct mtx bounce_lock;
88 static int total_bpages;
89 static int busdma_zonecount;
90
91 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
92 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
93
94 static MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
95
96 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
97    "Total bounce pages");
98
99 static void busdma_thread(void *);
100 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
101     int commit);
102
103 static int
104 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
105 {
106         struct bounce_zone *bz;
107
108         /* Reserve Necessary Bounce Pages */
109         mtx_lock(&bounce_lock);
110         if (flags & BUS_DMA_NOWAIT) {
111                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
112                         map->pagesneeded = 0;
113                         mtx_unlock(&bounce_lock);
114                         return (ENOMEM);
115                 }
116         } else {
117                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
118                         /* Queue us for resources */
119                         bz = dmat->bounce_zone;
120                         STAILQ_INSERT_TAIL(&bz->bounce_map_waitinglist, map,
121                             links);
122                         mtx_unlock(&bounce_lock);
123                         return (EINPROGRESS);
124                 }
125         }
126         mtx_unlock(&bounce_lock);
127
128         return (0);
129 }
130
131 static void
132 init_bounce_pages(void *dummy __unused)
133 {
134
135         total_bpages = 0;
136         STAILQ_INIT(&bounce_zone_list);
137         STAILQ_INIT(&bounce_map_callbacklist);
138         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
139 }
140 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
141
142 static struct sysctl_ctx_list *
143 busdma_sysctl_tree(struct bounce_zone *bz)
144 {
145
146         return (&bz->sysctl_tree);
147 }
148
149 static struct sysctl_oid *
150 busdma_sysctl_tree_top(struct bounce_zone *bz)
151 {
152
153         return (bz->sysctl_tree_top);
154 }
155
156 static int
157 alloc_bounce_zone(bus_dma_tag_t dmat)
158 {
159         struct bounce_zone *bz;
160         bool start_thread;
161
162         /* Check to see if we already have a suitable zone */
163         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
164                 if ((dmat_alignment(dmat) <= bz->alignment) &&
165 #ifdef dmat_domain
166                     dmat_domain(dmat) == bz->domain &&
167 #endif
168                     (dmat_lowaddr(dmat) >= bz->lowaddr)) {
169                         dmat->bounce_zone = bz;
170                         return (0);
171                 }
172         }
173
174         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
175             M_NOWAIT | M_ZERO)) == NULL)
176                 return (ENOMEM);
177
178         STAILQ_INIT(&bz->bounce_page_list);
179         STAILQ_INIT(&bz->bounce_map_waitinglist);
180         bz->free_bpages = 0;
181         bz->reserved_bpages = 0;
182         bz->active_bpages = 0;
183         bz->lowaddr = dmat_lowaddr(dmat);
184         bz->alignment = MAX(dmat_alignment(dmat), PAGE_SIZE);
185         bz->map_count = 0;
186 #ifdef dmat_domain
187         bz->domain = dmat_domain(dmat);
188 #endif
189         snprintf(bz->zoneid, sizeof(bz->zoneid), "zone%d", busdma_zonecount);
190         busdma_zonecount++;
191         snprintf(bz->lowaddrid, sizeof(bz->lowaddrid), "%#jx",
192             (uintmax_t)bz->lowaddr);
193         start_thread = STAILQ_EMPTY(&bounce_zone_list);
194         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
195         dmat->bounce_zone = bz;
196
197         sysctl_ctx_init(&bz->sysctl_tree);
198         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
199             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
200             CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
201         if (bz->sysctl_tree_top == NULL) {
202                 sysctl_ctx_free(&bz->sysctl_tree);
203                 return (0);     /* XXX error code? */
204         }
205
206         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
207             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
208             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
209             "Total bounce pages");
210         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
211             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
212             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
213             "Free bounce pages");
214         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
215             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
216             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
217             "Reserved bounce pages");
218         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
219             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
220             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
221             "Active bounce pages");
222         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
223             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
224             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
225             "Total bounce requests (pages bounced)");
226         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
227             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
228             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
229             "Total bounce requests that were deferred");
230         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
231             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
232             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
233         SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
234             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
235             "alignment", CTLFLAG_RD, &bz->alignment, "");
236 #ifdef dmat_domain
237         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
238             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
239             "domain", CTLFLAG_RD, &bz->domain, 0,
240             "memory domain");
241 #endif
242
243         if (start_thread) {
244                 if (kproc_create(busdma_thread, NULL, NULL, 0, 0, "busdma") !=
245                     0)
246                         printf("failed to create busdma thread");
247         }
248         return (0);
249 }
250
251 static int
252 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
253 {
254         struct bounce_zone *bz;
255         int count;
256
257         bz = dmat->bounce_zone;
258         count = 0;
259         while (numpages > 0) {
260                 struct bounce_page *bpage;
261
262 #ifdef dmat_domain
263                 bpage = malloc_domainset(sizeof(*bpage), M_BUSDMA,
264                     DOMAINSET_PREF(bz->domain), M_NOWAIT | M_ZERO);
265 #else
266                 bpage = malloc(sizeof(*bpage), M_BUSDMA, M_NOWAIT | M_ZERO);
267 #endif
268
269                 if (bpage == NULL)
270                         break;
271 #ifdef dmat_domain
272                 bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE,
273                     M_BOUNCE, DOMAINSET_PREF(bz->domain), M_NOWAIT,
274                     0ul, bz->lowaddr, PAGE_SIZE, 0);
275 #else
276                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
277                     M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
278 #endif
279                 if (bpage->vaddr == 0) {
280                         free(bpage, M_BUSDMA);
281                         break;
282                 }
283                 bpage->busaddr = pmap_kextract(bpage->vaddr);
284                 mtx_lock(&bounce_lock);
285                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
286                 total_bpages++;
287                 bz->total_bpages++;
288                 bz->free_bpages++;
289                 mtx_unlock(&bounce_lock);
290                 count++;
291                 numpages--;
292         }
293         return (count);
294 }
295
296 static int
297 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
298 {
299         struct bounce_zone *bz;
300         int pages;
301
302         mtx_assert(&bounce_lock, MA_OWNED);
303         bz = dmat->bounce_zone;
304         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
305         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
306                 return (map->pagesneeded - (map->pagesreserved + pages));
307         bz->free_bpages -= pages;
308         bz->reserved_bpages += pages;
309         map->pagesreserved += pages;
310         pages = map->pagesneeded - map->pagesreserved;
311
312         return (pages);
313 }
314
315 #if defined(__amd64__) || defined(__i386__)
316 static bus_addr_t
317 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
318     vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size)
319 #else
320 static bus_addr_t
321 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
322     bus_addr_t addr, bus_size_t size)
323 #endif
324 {
325         struct bounce_zone *bz;
326         struct bounce_page *bpage;
327
328         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
329         KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
330 #if defined(__amd64__) || defined(__i386__)
331         KASSERT(map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map));
332 #endif
333 #ifdef __riscv
334         KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
335             ("add_bounce_page: bad map %p", map));
336 #endif
337
338         bz = dmat->bounce_zone;
339         if (map->pagesneeded == 0)
340                 panic("add_bounce_page: map doesn't need any pages");
341         map->pagesneeded--;
342
343         if (map->pagesreserved == 0)
344                 panic("add_bounce_page: map doesn't need any pages");
345         map->pagesreserved--;
346
347         mtx_lock(&bounce_lock);
348         bpage = STAILQ_FIRST(&bz->bounce_page_list);
349         if (bpage == NULL)
350                 panic("add_bounce_page: free page list is empty");
351
352         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
353         bz->reserved_bpages--;
354         bz->active_bpages++;
355         mtx_unlock(&bounce_lock);
356
357         if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
358                 /* Page offset needs to be preserved. */
359 #if defined(__amd64__) || defined(__i386__)
360                 bpage->vaddr |= addr1 & PAGE_MASK;
361                 bpage->busaddr |= addr1 & PAGE_MASK;
362                 KASSERT(addr2 == 0,
363             ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
364 #else
365                 bpage->vaddr |= addr & PAGE_MASK;
366                 bpage->busaddr |= addr & PAGE_MASK;
367 #endif
368         }
369         bpage->datavaddr = vaddr;
370 #if defined(__amd64__) || defined(__i386__)
371         bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
372         KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
373         bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
374         bpage->dataoffs = addr1 & PAGE_MASK;
375 #else
376         bpage->datapage = PHYS_TO_VM_PAGE(addr);
377         bpage->dataoffs = addr & PAGE_MASK;
378 #endif
379         bpage->datacount = size;
380         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
381         return (bpage->busaddr);
382 }
383
384 static void
385 free_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
386 {
387         struct bounce_page *bpage;
388         struct bounce_zone *bz;
389         bool schedule_thread;
390         u_int count;
391
392         if (STAILQ_EMPTY(&map->bpages))
393                 return;
394
395         bz = dmat->bounce_zone;
396         count = 0;
397         schedule_thread = false;
398         STAILQ_FOREACH(bpage, &map->bpages, links) {
399                 bpage->datavaddr = 0;
400                 bpage->datacount = 0;
401
402                 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
403                         /*
404                          * Reset the bounce page to start at offset 0.
405                          * Other uses of this bounce page may need to
406                          * store a full page of data and/or assume it
407                          * starts on a page boundary.
408                          */
409                         bpage->vaddr &= ~PAGE_MASK;
410                         bpage->busaddr &= ~PAGE_MASK;
411                 }
412                 count++;
413         }
414
415         mtx_lock(&bounce_lock);
416         STAILQ_CONCAT(&bz->bounce_page_list, &map->bpages);
417         bz->free_bpages += count;
418         bz->active_bpages -= count;
419         while ((map = STAILQ_FIRST(&bz->bounce_map_waitinglist)) != NULL) {
420                 if (reserve_bounce_pages(map->dmat, map, 1) != 0)
421                         break;
422
423                 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
424                 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links);
425                 bz->total_deferred++;
426                 schedule_thread = true;
427         }
428         mtx_unlock(&bounce_lock);
429         if (schedule_thread)
430                 wakeup(&bounce_map_callbacklist);
431 }
432
433 static void
434 busdma_thread(void *dummy __unused)
435 {
436         STAILQ_HEAD(, bus_dmamap) callbacklist;
437         bus_dma_tag_t dmat;
438         struct bus_dmamap *map, *nmap;
439
440         thread_lock(curthread);
441         sched_class(curthread, PRI_ITHD);
442         sched_ithread_prio(curthread, PI_SWI(SWI_BUSDMA));
443         thread_unlock(curthread);
444         for (;;) {
445                 mtx_lock(&bounce_lock);
446                 while (STAILQ_EMPTY(&bounce_map_callbacklist))
447                         mtx_sleep(&bounce_map_callbacklist, &bounce_lock, 0,
448                             "-", 0);
449                 STAILQ_INIT(&callbacklist);
450                 STAILQ_CONCAT(&callbacklist, &bounce_map_callbacklist);
451                 mtx_unlock(&bounce_lock);
452
453                 STAILQ_FOREACH_SAFE(map, &callbacklist, links, nmap) {
454                         dmat = map->dmat;
455                         dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),
456                             BUS_DMA_LOCK);
457                         bus_dmamap_load_mem(map->dmat, map, &map->mem,
458                             map->callback, map->callback_arg, BUS_DMA_WAITOK);
459                         dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),
460                             BUS_DMA_UNLOCK);
461                 }
462         }
463 }