2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Common code for managing bounce pages for bus_dma backends. As
31 * this code currently assumes it can access internal members of
32 * opaque types like bus_dma_tag_t and bus_dmamap it is #include'd in
33 * backends rather than being compiled standalone.
37 * - M_BUSDMA malloc type
39 * - hw_busdma SYSCTL_NODE
40 * - macros to access the following fields of bus_dma_tag_t:
48 #include <sys/kthread.h>
49 #include <sys/sched.h>
52 vm_offset_t vaddr; /* kva of bounce buffer */
53 bus_addr_t busaddr; /* Physical address */
54 vm_offset_t datavaddr; /* kva of client data */
55 #if defined(__amd64__) || defined(__i386__)
56 vm_page_t datapage[2]; /* physical page(s) of client data */
58 vm_page_t datapage; /* physical page of client data */
60 vm_offset_t dataoffs; /* page offset of client data */
61 bus_size_t datacount; /* client data count */
62 STAILQ_ENTRY(bounce_page) links;
66 STAILQ_ENTRY(bounce_zone) links;
67 STAILQ_HEAD(, bounce_page) bounce_page_list;
68 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
79 sbintime_t total_deferred_time;
84 struct sysctl_ctx_list sysctl_tree;
85 struct sysctl_oid *sysctl_tree_top;
88 static struct mtx bounce_lock;
89 static int total_bpages;
90 static int busdma_zonecount;
92 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
93 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
95 static MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
97 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
98 "Total bounce pages");
100 static void busdma_thread(void *);
101 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
105 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
107 struct bounce_zone *bz;
109 /* Reserve Necessary Bounce Pages */
110 mtx_lock(&bounce_lock);
111 if (flags & BUS_DMA_NOWAIT) {
112 if (reserve_bounce_pages(dmat, map, 0) != 0) {
113 map->pagesneeded = 0;
114 mtx_unlock(&bounce_lock);
118 if (reserve_bounce_pages(dmat, map, 1) != 0) {
119 /* Queue us for resources */
120 bz = dmat->bounce_zone;
121 STAILQ_INSERT_TAIL(&bz->bounce_map_waitinglist, map,
123 map->queued_time = sbinuptime();
124 mtx_unlock(&bounce_lock);
125 return (EINPROGRESS);
128 mtx_unlock(&bounce_lock);
134 init_bounce_pages(void *dummy __unused)
138 STAILQ_INIT(&bounce_zone_list);
139 STAILQ_INIT(&bounce_map_callbacklist);
140 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
142 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
144 static struct sysctl_ctx_list *
145 busdma_sysctl_tree(struct bounce_zone *bz)
148 return (&bz->sysctl_tree);
151 static struct sysctl_oid *
152 busdma_sysctl_tree_top(struct bounce_zone *bz)
155 return (bz->sysctl_tree_top);
159 alloc_bounce_zone(bus_dma_tag_t dmat)
161 struct bounce_zone *bz;
164 /* Check to see if we already have a suitable zone */
165 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
166 if ((dmat_alignment(dmat) <= bz->alignment) &&
168 dmat_domain(dmat) == bz->domain &&
170 (dmat_lowaddr(dmat) >= bz->lowaddr)) {
171 dmat->bounce_zone = bz;
176 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
177 M_NOWAIT | M_ZERO)) == NULL)
180 STAILQ_INIT(&bz->bounce_page_list);
181 STAILQ_INIT(&bz->bounce_map_waitinglist);
183 bz->reserved_bpages = 0;
184 bz->active_bpages = 0;
185 bz->lowaddr = dmat_lowaddr(dmat);
186 bz->alignment = MAX(dmat_alignment(dmat), PAGE_SIZE);
189 bz->domain = dmat_domain(dmat);
191 snprintf(bz->zoneid, sizeof(bz->zoneid), "zone%d", busdma_zonecount);
193 snprintf(bz->lowaddrid, sizeof(bz->lowaddrid), "%#jx",
194 (uintmax_t)bz->lowaddr);
195 start_thread = STAILQ_EMPTY(&bounce_zone_list);
196 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
197 dmat->bounce_zone = bz;
199 sysctl_ctx_init(&bz->sysctl_tree);
200 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
201 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
202 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
203 if (bz->sysctl_tree_top == NULL) {
204 sysctl_ctx_free(&bz->sysctl_tree);
205 return (0); /* XXX error code? */
208 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
209 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
210 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
211 "Total bounce pages");
212 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
213 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
214 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
215 "Free bounce pages");
216 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
217 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
218 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
219 "Reserved bounce pages");
220 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
221 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
222 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
223 "Active bounce pages");
224 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
225 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
226 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
227 "Total bounce requests (pages bounced)");
228 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
229 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
230 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
231 "Total bounce requests that were deferred");
232 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
233 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
234 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
235 SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
236 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
237 "alignment", CTLFLAG_RD, &bz->alignment, "");
239 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
240 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
241 "domain", CTLFLAG_RD, &bz->domain, 0,
244 SYSCTL_ADD_SBINTIME_USEC(busdma_sysctl_tree(bz),
245 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
246 "total_deferred_time", CTLFLAG_RD, &bz->total_deferred_time,
247 "Cumulative time busdma requests are deferred (us)");
249 if (kproc_create(busdma_thread, NULL, NULL, 0, 0, "busdma") !=
251 printf("failed to create busdma thread");
257 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
259 struct bounce_zone *bz;
262 bz = dmat->bounce_zone;
264 while (numpages > 0) {
265 struct bounce_page *bpage;
268 bpage = malloc_domainset(sizeof(*bpage), M_BUSDMA,
269 DOMAINSET_PREF(bz->domain), M_NOWAIT | M_ZERO);
271 bpage = malloc(sizeof(*bpage), M_BUSDMA, M_NOWAIT | M_ZERO);
277 bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE,
278 M_BOUNCE, DOMAINSET_PREF(bz->domain), M_NOWAIT,
279 0ul, bz->lowaddr, PAGE_SIZE, 0);
281 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
282 M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
284 if (bpage->vaddr == 0) {
285 free(bpage, M_BUSDMA);
288 bpage->busaddr = pmap_kextract(bpage->vaddr);
289 mtx_lock(&bounce_lock);
290 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
294 mtx_unlock(&bounce_lock);
302 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
304 struct bounce_zone *bz;
307 mtx_assert(&bounce_lock, MA_OWNED);
308 bz = dmat->bounce_zone;
309 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
310 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
311 return (map->pagesneeded - (map->pagesreserved + pages));
312 bz->free_bpages -= pages;
313 bz->reserved_bpages += pages;
314 map->pagesreserved += pages;
315 pages = map->pagesneeded - map->pagesreserved;
320 #if defined(__amd64__) || defined(__i386__)
322 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
323 vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size)
326 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
327 bus_addr_t addr, bus_size_t size)
330 struct bounce_zone *bz;
331 struct bounce_page *bpage;
333 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
334 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
335 #if defined(__amd64__) || defined(__i386__)
336 KASSERT(map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map));
339 KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
340 ("add_bounce_page: bad map %p", map));
343 bz = dmat->bounce_zone;
344 if (map->pagesneeded == 0)
345 panic("add_bounce_page: map doesn't need any pages");
348 if (map->pagesreserved == 0)
349 panic("add_bounce_page: map doesn't need any pages");
350 map->pagesreserved--;
352 mtx_lock(&bounce_lock);
353 bpage = STAILQ_FIRST(&bz->bounce_page_list);
355 panic("add_bounce_page: free page list is empty");
357 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
358 bz->reserved_bpages--;
360 mtx_unlock(&bounce_lock);
362 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
363 /* Page offset needs to be preserved. */
364 #if defined(__amd64__) || defined(__i386__)
365 bpage->vaddr |= addr1 & PAGE_MASK;
366 bpage->busaddr |= addr1 & PAGE_MASK;
368 ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
370 bpage->vaddr |= addr & PAGE_MASK;
371 bpage->busaddr |= addr & PAGE_MASK;
374 bpage->datavaddr = vaddr;
375 #if defined(__amd64__) || defined(__i386__)
376 bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
377 KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
378 bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
379 bpage->dataoffs = addr1 & PAGE_MASK;
381 bpage->datapage = PHYS_TO_VM_PAGE(addr);
382 bpage->dataoffs = addr & PAGE_MASK;
384 bpage->datacount = size;
385 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
386 return (bpage->busaddr);
390 free_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
392 struct bounce_page *bpage;
393 struct bounce_zone *bz;
394 bool schedule_thread;
397 if (STAILQ_EMPTY(&map->bpages))
400 bz = dmat->bounce_zone;
402 schedule_thread = false;
403 STAILQ_FOREACH(bpage, &map->bpages, links) {
404 bpage->datavaddr = 0;
405 bpage->datacount = 0;
407 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
409 * Reset the bounce page to start at offset 0.
410 * Other uses of this bounce page may need to
411 * store a full page of data and/or assume it
412 * starts on a page boundary.
414 bpage->vaddr &= ~PAGE_MASK;
415 bpage->busaddr &= ~PAGE_MASK;
420 mtx_lock(&bounce_lock);
421 STAILQ_CONCAT(&bz->bounce_page_list, &map->bpages);
422 bz->free_bpages += count;
423 bz->active_bpages -= count;
424 while ((map = STAILQ_FIRST(&bz->bounce_map_waitinglist)) != NULL) {
425 if (reserve_bounce_pages(map->dmat, map, 1) != 0)
428 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
429 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links);
430 bz->total_deferred++;
431 schedule_thread = true;
433 mtx_unlock(&bounce_lock);
435 wakeup(&bounce_map_callbacklist);
439 busdma_thread(void *dummy __unused)
441 STAILQ_HEAD(, bus_dmamap) callbacklist;
443 struct bus_dmamap *map, *nmap;
444 struct bounce_zone *bz;
446 thread_lock(curthread);
447 sched_class(curthread, PRI_ITHD);
448 sched_ithread_prio(curthread, PI_SWI(SWI_BUSDMA));
449 thread_unlock(curthread);
451 mtx_lock(&bounce_lock);
452 while (STAILQ_EMPTY(&bounce_map_callbacklist))
453 mtx_sleep(&bounce_map_callbacklist, &bounce_lock, 0,
455 STAILQ_INIT(&callbacklist);
456 STAILQ_CONCAT(&callbacklist, &bounce_map_callbacklist);
457 mtx_unlock(&bounce_lock);
459 STAILQ_FOREACH_SAFE(map, &callbacklist, links, nmap) {
461 bz = dmat->bounce_zone;
462 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),
464 bz->total_deferred_time += (sbinuptime() - map->queued_time);
465 bus_dmamap_load_mem(map->dmat, map, &map->mem,
466 map->callback, map->callback_arg, BUS_DMA_WAITOK);
467 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),