2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Common code for managing bounce pages for bus_dma backends. As
31 * this code currently assumes it can access internal members of
32 * opaque types like bus_dma_tag_t and bus_dmamap it is #include'd in
33 * backends rather than being compiled standalone.
37 * - M_BUSDMA malloc type
39 * - hw_busdma SYSCTL_NODE
40 * - macros to access the following fields of bus_dma_tag_t:
48 #include <sys/kthread.h>
49 #include <sys/sched.h>
52 vm_offset_t vaddr; /* kva of bounce buffer */
53 bus_addr_t busaddr; /* Physical address */
54 vm_offset_t datavaddr; /* kva of client data */
55 #if defined(__amd64__) || defined(__i386__)
56 vm_page_t datapage[2]; /* physical page(s) of client data */
58 vm_page_t datapage; /* physical page of client data */
60 vm_offset_t dataoffs; /* page offset of client data */
61 bus_size_t datacount; /* client data count */
62 STAILQ_ENTRY(bounce_page) links;
66 STAILQ_ENTRY(bounce_zone) links;
67 STAILQ_HEAD(, bounce_page) bounce_page_list;
68 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
79 sbintime_t total_deferred_time;
84 struct sysctl_ctx_list sysctl_tree;
85 struct sysctl_oid *sysctl_tree_top;
88 static struct mtx bounce_lock;
89 static int total_bpages;
90 static int busdma_zonecount;
92 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
93 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
95 static MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
97 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
98 "Total bounce pages");
100 static void busdma_thread(void *);
101 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
105 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
107 struct bounce_zone *bz;
109 /* Reserve Necessary Bounce Pages */
110 mtx_lock(&bounce_lock);
111 if (flags & BUS_DMA_NOWAIT) {
112 if (reserve_bounce_pages(dmat, map, 0) != 0) {
113 map->pagesneeded = 0;
114 mtx_unlock(&bounce_lock);
118 if (reserve_bounce_pages(dmat, map, 1) != 0) {
119 /* Queue us for resources */
120 bz = dmat->bounce_zone;
121 STAILQ_INSERT_TAIL(&bz->bounce_map_waitinglist, map,
123 map->queued_time = sbinuptime();
124 mtx_unlock(&bounce_lock);
125 return (EINPROGRESS);
128 mtx_unlock(&bounce_lock);
134 init_bounce_pages(void *dummy __unused)
138 STAILQ_INIT(&bounce_zone_list);
139 STAILQ_INIT(&bounce_map_callbacklist);
140 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
142 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
144 static struct sysctl_ctx_list *
145 busdma_sysctl_tree(struct bounce_zone *bz)
148 return (&bz->sysctl_tree);
151 static struct sysctl_oid *
152 busdma_sysctl_tree_top(struct bounce_zone *bz)
155 return (bz->sysctl_tree_top);
159 * Returns true if the address falls within the tag's exclusion window, or
160 * fails to meet its alignment requirements.
163 addr_needs_bounce(bus_dma_tag_t dmat, bus_addr_t paddr)
166 if (paddr > dmat_lowaddr(dmat) && paddr <= dmat_highaddr(dmat))
168 if (!vm_addr_align_ok(paddr, dmat_alignment(dmat)))
175 alloc_bounce_zone(bus_dma_tag_t dmat)
177 struct bounce_zone *bz;
180 /* Check to see if we already have a suitable zone */
181 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
182 if ((dmat_alignment(dmat) <= bz->alignment) &&
184 dmat_domain(dmat) == bz->domain &&
186 (dmat_lowaddr(dmat) >= bz->lowaddr)) {
187 dmat->bounce_zone = bz;
192 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
193 M_NOWAIT | M_ZERO)) == NULL)
196 STAILQ_INIT(&bz->bounce_page_list);
197 STAILQ_INIT(&bz->bounce_map_waitinglist);
199 bz->reserved_bpages = 0;
200 bz->active_bpages = 0;
201 bz->lowaddr = dmat_lowaddr(dmat);
202 bz->alignment = MAX(dmat_alignment(dmat), PAGE_SIZE);
205 bz->domain = dmat_domain(dmat);
207 snprintf(bz->zoneid, sizeof(bz->zoneid), "zone%d", busdma_zonecount);
209 snprintf(bz->lowaddrid, sizeof(bz->lowaddrid), "%#jx",
210 (uintmax_t)bz->lowaddr);
211 start_thread = STAILQ_EMPTY(&bounce_zone_list);
212 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
213 dmat->bounce_zone = bz;
215 sysctl_ctx_init(&bz->sysctl_tree);
216 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
217 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
218 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
219 if (bz->sysctl_tree_top == NULL) {
220 sysctl_ctx_free(&bz->sysctl_tree);
221 return (0); /* XXX error code? */
224 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
225 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
226 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
227 "Total bounce pages");
228 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
229 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
230 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
231 "Free bounce pages");
232 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
233 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
234 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
235 "Reserved bounce pages");
236 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
237 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
238 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
239 "Active bounce pages");
240 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
241 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
242 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
243 "Total bounce requests (pages bounced)");
244 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
245 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
246 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
247 "Total bounce requests that were deferred");
248 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
249 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
250 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
251 SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
252 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
253 "alignment", CTLFLAG_RD, &bz->alignment, "");
255 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
256 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
257 "domain", CTLFLAG_RD, &bz->domain, 0,
260 SYSCTL_ADD_SBINTIME_USEC(busdma_sysctl_tree(bz),
261 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
262 "total_deferred_time", CTLFLAG_RD, &bz->total_deferred_time,
263 "Cumulative time busdma requests are deferred (us)");
265 if (kproc_create(busdma_thread, NULL, NULL, 0, 0, "busdma") !=
267 printf("failed to create busdma thread");
273 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
275 struct bounce_zone *bz;
278 bz = dmat->bounce_zone;
280 while (numpages > 0) {
281 struct bounce_page *bpage;
284 bpage = malloc_domainset(sizeof(*bpage), M_BUSDMA,
285 DOMAINSET_PREF(bz->domain), M_NOWAIT | M_ZERO);
287 bpage = malloc(sizeof(*bpage), M_BUSDMA, M_NOWAIT | M_ZERO);
293 bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE,
294 M_BOUNCE, DOMAINSET_PREF(bz->domain), M_NOWAIT,
295 0ul, bz->lowaddr, PAGE_SIZE, 0);
297 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
298 M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
300 if (bpage->vaddr == 0) {
301 free(bpage, M_BUSDMA);
304 bpage->busaddr = pmap_kextract(bpage->vaddr);
305 mtx_lock(&bounce_lock);
306 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
310 mtx_unlock(&bounce_lock);
318 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
320 struct bounce_zone *bz;
323 mtx_assert(&bounce_lock, MA_OWNED);
324 bz = dmat->bounce_zone;
325 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
326 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
327 return (map->pagesneeded - (map->pagesreserved + pages));
328 bz->free_bpages -= pages;
329 bz->reserved_bpages += pages;
330 map->pagesreserved += pages;
331 pages = map->pagesneeded - map->pagesreserved;
336 #if defined(__amd64__) || defined(__i386__)
338 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
339 vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size)
342 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
343 bus_addr_t addr, bus_size_t size)
346 struct bounce_zone *bz;
347 struct bounce_page *bpage;
349 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
350 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
351 #if defined(__amd64__) || defined(__i386__)
352 KASSERT(map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map));
355 KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
356 ("add_bounce_page: bad map %p", map));
359 bz = dmat->bounce_zone;
360 if (map->pagesneeded == 0)
361 panic("add_bounce_page: map doesn't need any pages");
364 if (map->pagesreserved == 0)
365 panic("add_bounce_page: map doesn't need any pages");
366 map->pagesreserved--;
368 mtx_lock(&bounce_lock);
369 bpage = STAILQ_FIRST(&bz->bounce_page_list);
371 panic("add_bounce_page: free page list is empty");
373 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
374 bz->reserved_bpages--;
376 mtx_unlock(&bounce_lock);
378 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
379 /* Page offset needs to be preserved. */
380 #if defined(__amd64__) || defined(__i386__)
381 bpage->vaddr |= addr1 & PAGE_MASK;
382 bpage->busaddr |= addr1 & PAGE_MASK;
384 ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
386 bpage->vaddr |= addr & PAGE_MASK;
387 bpage->busaddr |= addr & PAGE_MASK;
390 bpage->datavaddr = vaddr;
391 #if defined(__amd64__) || defined(__i386__)
392 bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
393 KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
394 bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
395 bpage->dataoffs = addr1 & PAGE_MASK;
397 bpage->datapage = PHYS_TO_VM_PAGE(addr);
398 bpage->dataoffs = addr & PAGE_MASK;
400 bpage->datacount = size;
401 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
402 return (bpage->busaddr);
406 free_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
408 struct bounce_page *bpage;
409 struct bounce_zone *bz;
410 bool schedule_thread;
413 if (STAILQ_EMPTY(&map->bpages))
416 bz = dmat->bounce_zone;
418 schedule_thread = false;
419 STAILQ_FOREACH(bpage, &map->bpages, links) {
420 bpage->datavaddr = 0;
421 bpage->datacount = 0;
423 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
425 * Reset the bounce page to start at offset 0.
426 * Other uses of this bounce page may need to
427 * store a full page of data and/or assume it
428 * starts on a page boundary.
430 bpage->vaddr &= ~PAGE_MASK;
431 bpage->busaddr &= ~PAGE_MASK;
436 mtx_lock(&bounce_lock);
437 STAILQ_CONCAT(&bz->bounce_page_list, &map->bpages);
438 bz->free_bpages += count;
439 bz->active_bpages -= count;
440 while ((map = STAILQ_FIRST(&bz->bounce_map_waitinglist)) != NULL) {
441 if (reserve_bounce_pages(map->dmat, map, 1) != 0)
444 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
445 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links);
446 bz->total_deferred++;
447 schedule_thread = true;
449 mtx_unlock(&bounce_lock);
451 wakeup(&bounce_map_callbacklist);
455 busdma_thread(void *dummy __unused)
457 STAILQ_HEAD(, bus_dmamap) callbacklist;
459 struct bus_dmamap *map, *nmap;
460 struct bounce_zone *bz;
462 thread_lock(curthread);
463 sched_class(curthread, PRI_ITHD);
464 sched_ithread_prio(curthread, PI_SWI(SWI_BUSDMA));
465 thread_unlock(curthread);
467 mtx_lock(&bounce_lock);
468 while (STAILQ_EMPTY(&bounce_map_callbacklist))
469 mtx_sleep(&bounce_map_callbacklist, &bounce_lock, 0,
471 STAILQ_INIT(&callbacklist);
472 STAILQ_CONCAT(&callbacklist, &bounce_map_callbacklist);
473 mtx_unlock(&bounce_lock);
475 STAILQ_FOREACH_SAFE(map, &callbacklist, links, nmap) {
477 bz = dmat->bounce_zone;
478 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),
480 bz->total_deferred_time += (sbinuptime() - map->queued_time);
481 bus_dmamap_load_mem(map->dmat, map, &map->mem,
482 map->callback, map->callback_arg, BUS_DMA_WAITOK);
483 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),