]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_busdma_bounce.c
cache: use flexible array member
[FreeBSD/FreeBSD.git] / sys / kern / subr_busdma_bounce.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 /*
30  * Common code for managing bounce pages for bus_dma backends.  As
31  * this code currently assumes it can access internal members of
32  * opaque types like bus_dma_tag_t and bus_dmamap it is #include'd in
33  * backends rather than being compiled standalone.
34  *
35  * Prerequisites:
36  *
37  * - M_BUSDMA malloc type
38  * - struct bus_dmamap
39  * - hw_busdma SYSCTL_NODE
40  * - macros to access the following fields of bus_dma_tag_t:
41  *   - dmat_alignment()
42  *   - dmat_flags()
43  *   - dmat_lowaddr()
44  *   - dmat_lockfunc()
45  *   - dmat_lockarg()
46  */
47
48 struct bounce_page {
49         vm_offset_t     vaddr;          /* kva of bounce buffer */
50         bus_addr_t      busaddr;        /* Physical address */
51         vm_offset_t     datavaddr;      /* kva of client data */
52 #if defined(__amd64__) || defined(__i386__)
53         vm_page_t       datapage[2];    /* physical page(s) of client data */
54 #else
55         vm_page_t       datapage;       /* physical page of client data */
56 #endif
57         vm_offset_t     dataoffs;       /* page offset of client data */
58         bus_size_t      datacount;      /* client data count */
59         STAILQ_ENTRY(bounce_page) links;
60 };
61
62 struct bounce_zone {
63         STAILQ_ENTRY(bounce_zone) links;
64         STAILQ_HEAD(, bounce_page) bounce_page_list;
65         int             total_bpages;
66         int             free_bpages;
67         int             reserved_bpages;
68         int             active_bpages;
69         int             total_bounced;
70         int             total_deferred;
71         int             map_count;
72 #ifdef dmat_domain
73         int             domain;
74 #endif
75         bus_size_t      alignment;
76         bus_addr_t      lowaddr;
77         char            zoneid[8];
78         char            lowaddrid[20];
79         struct sysctl_ctx_list sysctl_tree;
80         struct sysctl_oid *sysctl_tree_top;
81 };
82
83 static struct mtx bounce_lock;
84 static int total_bpages;
85 static int busdma_zonecount;
86
87 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
88 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
89 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
90 static void *busdma_ih;
91
92 static MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
93
94 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
95    "Total bounce pages");
96
97 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
98     int commit);
99
100 static int
101 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
102 {
103
104         /* Reserve Necessary Bounce Pages */
105         mtx_lock(&bounce_lock);
106         if (flags & BUS_DMA_NOWAIT) {
107                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
108                         map->pagesneeded = 0;
109                         mtx_unlock(&bounce_lock);
110                         return (ENOMEM);
111                 }
112         } else {
113                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
114                         /* Queue us for resources */
115                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
116                         mtx_unlock(&bounce_lock);
117                         return (EINPROGRESS);
118                 }
119         }
120         mtx_unlock(&bounce_lock);
121
122         return (0);
123 }
124
125 static void
126 init_bounce_pages(void *dummy __unused)
127 {
128
129         total_bpages = 0;
130         STAILQ_INIT(&bounce_zone_list);
131         STAILQ_INIT(&bounce_map_waitinglist);
132         STAILQ_INIT(&bounce_map_callbacklist);
133         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
134 }
135 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
136
137 static struct sysctl_ctx_list *
138 busdma_sysctl_tree(struct bounce_zone *bz)
139 {
140
141         return (&bz->sysctl_tree);
142 }
143
144 static struct sysctl_oid *
145 busdma_sysctl_tree_top(struct bounce_zone *bz)
146 {
147
148         return (bz->sysctl_tree_top);
149 }
150
151 static int
152 alloc_bounce_zone(bus_dma_tag_t dmat)
153 {
154         struct bounce_zone *bz;
155
156         /* Check to see if we already have a suitable zone */
157         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
158                 if ((dmat_alignment(dmat) <= bz->alignment) &&
159 #ifdef dmat_domain
160                     dmat_domain(dmat) == bz->domain &&
161 #endif
162                     (dmat_lowaddr(dmat) >= bz->lowaddr)) {
163                         dmat->bounce_zone = bz;
164                         return (0);
165                 }
166         }
167
168         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
169             M_NOWAIT | M_ZERO)) == NULL)
170                 return (ENOMEM);
171
172         STAILQ_INIT(&bz->bounce_page_list);
173         bz->free_bpages = 0;
174         bz->reserved_bpages = 0;
175         bz->active_bpages = 0;
176         bz->lowaddr = dmat_lowaddr(dmat);
177         bz->alignment = MAX(dmat_alignment(dmat), PAGE_SIZE);
178         bz->map_count = 0;
179 #ifdef dmat_domain
180         bz->domain = dmat_domain(dmat);
181 #endif
182         snprintf(bz->zoneid, sizeof(bz->zoneid), "zone%d", busdma_zonecount);
183         busdma_zonecount++;
184         snprintf(bz->lowaddrid, sizeof(bz->lowaddrid), "%#jx",
185             (uintmax_t)bz->lowaddr);
186         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
187         dmat->bounce_zone = bz;
188
189         sysctl_ctx_init(&bz->sysctl_tree);
190         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
191             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
192             CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
193         if (bz->sysctl_tree_top == NULL) {
194                 sysctl_ctx_free(&bz->sysctl_tree);
195                 return (0);     /* XXX error code? */
196         }
197
198         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
199             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
200             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
201             "Total bounce pages");
202         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
203             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
204             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
205             "Free bounce pages");
206         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
207             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
208             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
209             "Reserved bounce pages");
210         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
211             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
212             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
213             "Active bounce pages");
214         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
215             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
216             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
217             "Total bounce requests (pages bounced)");
218         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
219             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
220             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
221             "Total bounce requests that were deferred");
222         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
223             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
224             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
225         SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
226             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
227             "alignment", CTLFLAG_RD, &bz->alignment, "");
228 #ifdef dmat_domain
229         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
230             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
231             "domain", CTLFLAG_RD, &bz->domain, 0,
232             "memory domain");
233 #endif
234
235         return (0);
236 }
237
238 static int
239 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
240 {
241         struct bounce_zone *bz;
242         int count;
243
244         bz = dmat->bounce_zone;
245         count = 0;
246         while (numpages > 0) {
247                 struct bounce_page *bpage;
248
249 #ifdef dmat_domain
250                 bpage = malloc_domainset(sizeof(*bpage), M_BUSDMA,
251                     DOMAINSET_PREF(bz->domain), M_NOWAIT | M_ZERO);
252 #else
253                 bpage = malloc(sizeof(*bpage), M_BUSDMA, M_NOWAIT | M_ZERO);
254 #endif
255
256                 if (bpage == NULL)
257                         break;
258 #ifdef dmat_domain
259                 bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE,
260                     M_BOUNCE, DOMAINSET_PREF(bz->domain), M_NOWAIT,
261                     0ul, bz->lowaddr, PAGE_SIZE, 0);
262 #else
263                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
264                     M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
265 #endif
266                 if (bpage->vaddr == 0) {
267                         free(bpage, M_BUSDMA);
268                         break;
269                 }
270                 bpage->busaddr = pmap_kextract(bpage->vaddr);
271                 mtx_lock(&bounce_lock);
272                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
273                 total_bpages++;
274                 bz->total_bpages++;
275                 bz->free_bpages++;
276                 mtx_unlock(&bounce_lock);
277                 count++;
278                 numpages--;
279         }
280         return (count);
281 }
282
283 static int
284 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
285 {
286         struct bounce_zone *bz;
287         int pages;
288
289         mtx_assert(&bounce_lock, MA_OWNED);
290         bz = dmat->bounce_zone;
291         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
292         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
293                 return (map->pagesneeded - (map->pagesreserved + pages));
294         bz->free_bpages -= pages;
295         bz->reserved_bpages += pages;
296         map->pagesreserved += pages;
297         pages = map->pagesneeded - map->pagesreserved;
298
299         return (pages);
300 }
301
302 #if defined(__amd64__) || defined(__i386__)
303 static bus_addr_t
304 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
305     vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size)
306 #else
307 static bus_addr_t
308 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
309     bus_addr_t addr, bus_size_t size)
310 #endif
311 {
312         struct bounce_zone *bz;
313         struct bounce_page *bpage;
314
315         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
316         KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
317 #if defined(__amd64__) || defined(__i386__)
318         KASSERT(map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map));
319 #endif
320 #ifdef __riscv
321         KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
322             ("add_bounce_page: bad map %p", map));
323 #endif
324
325         bz = dmat->bounce_zone;
326         if (map->pagesneeded == 0)
327                 panic("add_bounce_page: map doesn't need any pages");
328         map->pagesneeded--;
329
330         if (map->pagesreserved == 0)
331                 panic("add_bounce_page: map doesn't need any pages");
332         map->pagesreserved--;
333
334         mtx_lock(&bounce_lock);
335         bpage = STAILQ_FIRST(&bz->bounce_page_list);
336         if (bpage == NULL)
337                 panic("add_bounce_page: free page list is empty");
338
339         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
340         bz->reserved_bpages--;
341         bz->active_bpages++;
342         mtx_unlock(&bounce_lock);
343
344         if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
345                 /* Page offset needs to be preserved. */
346 #if defined(__amd64__) || defined(__i386__)
347                 bpage->vaddr |= addr1 & PAGE_MASK;
348                 bpage->busaddr |= addr1 & PAGE_MASK;
349                 KASSERT(addr2 == 0,
350             ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
351 #else
352                 bpage->vaddr |= addr & PAGE_MASK;
353                 bpage->busaddr |= addr & PAGE_MASK;
354 #endif
355         }
356         bpage->datavaddr = vaddr;
357 #if defined(__amd64__) || defined(__i386__)
358         bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
359         KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
360         bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
361         bpage->dataoffs = addr1 & PAGE_MASK;
362 #else
363         bpage->datapage = PHYS_TO_VM_PAGE(addr);
364         bpage->dataoffs = addr & PAGE_MASK;
365 #endif
366         bpage->datacount = size;
367         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
368         return (bpage->busaddr);
369 }
370
371 static void
372 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
373 {
374         struct bus_dmamap *map;
375         struct bounce_zone *bz;
376         bool schedule_swi;
377
378         bz = dmat->bounce_zone;
379         bpage->datavaddr = 0;
380         bpage->datacount = 0;
381         if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
382                 /*
383                  * Reset the bounce page to start at offset 0.  Other uses
384                  * of this bounce page may need to store a full page of
385                  * data and/or assume it starts on a page boundary.
386                  */
387                 bpage->vaddr &= ~PAGE_MASK;
388                 bpage->busaddr &= ~PAGE_MASK;
389         }
390
391         schedule_swi = false;
392         mtx_lock(&bounce_lock);
393         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
394         bz->free_bpages++;
395         bz->active_bpages--;
396         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
397                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
398                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
399                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
400                             map, links);
401                         bz->total_deferred++;
402                         schedule_swi = true;
403                 }
404         }
405         mtx_unlock(&bounce_lock);
406         if (schedule_swi)
407                 swi_sched(busdma_ih, 0);
408 }
409
410 static void
411 busdma_swi(void *dummy __unused)
412 {
413         bus_dma_tag_t dmat;
414         struct bus_dmamap *map;
415
416         mtx_lock(&bounce_lock);
417         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
418                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
419                 mtx_unlock(&bounce_lock);
420                 dmat = map->dmat;
421                 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat), BUS_DMA_LOCK);
422                 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
423                     map->callback_arg, BUS_DMA_WAITOK);
424                 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat), BUS_DMA_UNLOCK);
425                 mtx_lock(&bounce_lock);
426         }
427         mtx_unlock(&bounce_lock);
428 }
429
430 static void
431 start_busdma_swi(void *dummy __unused)
432 {
433         if (swi_add(NULL, "busdma", busdma_swi, NULL, SWI_BUSDMA, INTR_MPSAFE,
434             &busdma_ih))
435                 panic("died while creating busdma swi ithread");
436 }
437 SYSINIT(start_busdma_swi, SI_SUB_SOFTINTR, SI_ORDER_ANY, start_busdma_swi,
438     NULL);