2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Common code for managing bounce pages for bus_dma backends. As
31 * this code currently assumes it can access internal members of
32 * opaque types like bus_dma_tag_t and bus_dmamap it is #include'd in
33 * backends rather than being compiled standalone.
37 * - M_BUSDMA malloc type
39 * - hw_busdma SYSCTL_NODE
40 * - macros to access the following fields of bus_dma_tag_t:
49 vm_offset_t vaddr; /* kva of bounce buffer */
50 bus_addr_t busaddr; /* Physical address */
51 vm_offset_t datavaddr; /* kva of client data */
52 #if defined(__amd64__) || defined(__i386__)
53 vm_page_t datapage[2]; /* physical page(s) of client data */
55 vm_page_t datapage; /* physical page of client data */
57 vm_offset_t dataoffs; /* page offset of client data */
58 bus_size_t datacount; /* client data count */
59 STAILQ_ENTRY(bounce_page) links;
63 STAILQ_ENTRY(bounce_zone) links;
64 STAILQ_HEAD(, bounce_page) bounce_page_list;
79 struct sysctl_ctx_list sysctl_tree;
80 struct sysctl_oid *sysctl_tree_top;
83 static struct mtx bounce_lock;
84 static int total_bpages;
85 static int busdma_zonecount;
87 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
88 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
89 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
90 static void *busdma_ih;
92 static MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
94 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
95 "Total bounce pages");
97 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
101 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
104 /* Reserve Necessary Bounce Pages */
105 mtx_lock(&bounce_lock);
106 if (flags & BUS_DMA_NOWAIT) {
107 if (reserve_bounce_pages(dmat, map, 0) != 0) {
108 map->pagesneeded = 0;
109 mtx_unlock(&bounce_lock);
113 if (reserve_bounce_pages(dmat, map, 1) != 0) {
114 /* Queue us for resources */
115 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
116 mtx_unlock(&bounce_lock);
117 return (EINPROGRESS);
120 mtx_unlock(&bounce_lock);
126 init_bounce_pages(void *dummy __unused)
130 STAILQ_INIT(&bounce_zone_list);
131 STAILQ_INIT(&bounce_map_waitinglist);
132 STAILQ_INIT(&bounce_map_callbacklist);
133 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
135 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
137 static struct sysctl_ctx_list *
138 busdma_sysctl_tree(struct bounce_zone *bz)
141 return (&bz->sysctl_tree);
144 static struct sysctl_oid *
145 busdma_sysctl_tree_top(struct bounce_zone *bz)
148 return (bz->sysctl_tree_top);
152 alloc_bounce_zone(bus_dma_tag_t dmat)
154 struct bounce_zone *bz;
156 /* Check to see if we already have a suitable zone */
157 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
158 if ((dmat_alignment(dmat) <= bz->alignment) &&
160 dmat_domain(dmat) == bz->domain &&
162 (dmat_lowaddr(dmat) >= bz->lowaddr)) {
163 dmat->bounce_zone = bz;
168 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
169 M_NOWAIT | M_ZERO)) == NULL)
172 STAILQ_INIT(&bz->bounce_page_list);
174 bz->reserved_bpages = 0;
175 bz->active_bpages = 0;
176 bz->lowaddr = dmat_lowaddr(dmat);
177 bz->alignment = MAX(dmat_alignment(dmat), PAGE_SIZE);
180 bz->domain = dmat_domain(dmat);
182 snprintf(bz->zoneid, sizeof(bz->zoneid), "zone%d", busdma_zonecount);
184 snprintf(bz->lowaddrid, sizeof(bz->lowaddrid), "%#jx",
185 (uintmax_t)bz->lowaddr);
186 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
187 dmat->bounce_zone = bz;
189 sysctl_ctx_init(&bz->sysctl_tree);
190 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
191 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
192 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
193 if (bz->sysctl_tree_top == NULL) {
194 sysctl_ctx_free(&bz->sysctl_tree);
195 return (0); /* XXX error code? */
198 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
199 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
200 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
201 "Total bounce pages");
202 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
203 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
204 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
205 "Free bounce pages");
206 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
207 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
208 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
209 "Reserved bounce pages");
210 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
211 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
212 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
213 "Active bounce pages");
214 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
215 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
216 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
217 "Total bounce requests (pages bounced)");
218 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
219 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
220 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
221 "Total bounce requests that were deferred");
222 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
223 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
224 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
225 SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
226 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
227 "alignment", CTLFLAG_RD, &bz->alignment, "");
229 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
230 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
231 "domain", CTLFLAG_RD, &bz->domain, 0,
239 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
241 struct bounce_zone *bz;
244 bz = dmat->bounce_zone;
246 while (numpages > 0) {
247 struct bounce_page *bpage;
250 bpage = malloc_domainset(sizeof(*bpage), M_BUSDMA,
251 DOMAINSET_PREF(bz->domain), M_NOWAIT | M_ZERO);
253 bpage = malloc(sizeof(*bpage), M_BUSDMA, M_NOWAIT | M_ZERO);
259 bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE,
260 M_BOUNCE, DOMAINSET_PREF(bz->domain), M_NOWAIT,
261 0ul, bz->lowaddr, PAGE_SIZE, 0);
263 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
264 M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
266 if (bpage->vaddr == 0) {
267 free(bpage, M_BUSDMA);
270 bpage->busaddr = pmap_kextract(bpage->vaddr);
271 mtx_lock(&bounce_lock);
272 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
276 mtx_unlock(&bounce_lock);
284 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
286 struct bounce_zone *bz;
289 mtx_assert(&bounce_lock, MA_OWNED);
290 bz = dmat->bounce_zone;
291 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
292 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
293 return (map->pagesneeded - (map->pagesreserved + pages));
294 bz->free_bpages -= pages;
295 bz->reserved_bpages += pages;
296 map->pagesreserved += pages;
297 pages = map->pagesneeded - map->pagesreserved;
302 #if defined(__amd64__) || defined(__i386__)
304 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
305 vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size)
308 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
309 bus_addr_t addr, bus_size_t size)
312 struct bounce_zone *bz;
313 struct bounce_page *bpage;
315 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
316 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
317 #if defined(__amd64__) || defined(__i386__)
318 KASSERT(map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map));
321 KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
322 ("add_bounce_page: bad map %p", map));
325 bz = dmat->bounce_zone;
326 if (map->pagesneeded == 0)
327 panic("add_bounce_page: map doesn't need any pages");
330 if (map->pagesreserved == 0)
331 panic("add_bounce_page: map doesn't need any pages");
332 map->pagesreserved--;
334 mtx_lock(&bounce_lock);
335 bpage = STAILQ_FIRST(&bz->bounce_page_list);
337 panic("add_bounce_page: free page list is empty");
339 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
340 bz->reserved_bpages--;
342 mtx_unlock(&bounce_lock);
344 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
345 /* Page offset needs to be preserved. */
346 #if defined(__amd64__) || defined(__i386__)
347 bpage->vaddr |= addr1 & PAGE_MASK;
348 bpage->busaddr |= addr1 & PAGE_MASK;
350 ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
352 bpage->vaddr |= addr & PAGE_MASK;
353 bpage->busaddr |= addr & PAGE_MASK;
356 bpage->datavaddr = vaddr;
357 #if defined(__amd64__) || defined(__i386__)
358 bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
359 KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
360 bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
361 bpage->dataoffs = addr1 & PAGE_MASK;
363 bpage->datapage = PHYS_TO_VM_PAGE(addr);
364 bpage->dataoffs = addr & PAGE_MASK;
366 bpage->datacount = size;
367 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
368 return (bpage->busaddr);
372 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
374 struct bus_dmamap *map;
375 struct bounce_zone *bz;
378 bz = dmat->bounce_zone;
379 bpage->datavaddr = 0;
380 bpage->datacount = 0;
381 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
383 * Reset the bounce page to start at offset 0. Other uses
384 * of this bounce page may need to store a full page of
385 * data and/or assume it starts on a page boundary.
387 bpage->vaddr &= ~PAGE_MASK;
388 bpage->busaddr &= ~PAGE_MASK;
391 schedule_swi = false;
392 mtx_lock(&bounce_lock);
393 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
396 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
397 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
398 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
399 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
401 bz->total_deferred++;
405 mtx_unlock(&bounce_lock);
407 swi_sched(busdma_ih, 0);
411 busdma_swi(void *dummy __unused)
414 struct bus_dmamap *map;
416 mtx_lock(&bounce_lock);
417 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
418 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
419 mtx_unlock(&bounce_lock);
421 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat), BUS_DMA_LOCK);
422 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
423 map->callback_arg, BUS_DMA_WAITOK);
424 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat), BUS_DMA_UNLOCK);
425 mtx_lock(&bounce_lock);
427 mtx_unlock(&bounce_lock);
431 start_busdma_swi(void *dummy __unused)
433 if (swi_add(NULL, "busdma", busdma_swi, NULL, SWI_BUSDMA, INTR_MPSAFE,
435 panic("died while creating busdma swi ithread");
437 SYSINIT(start_busdma_swi, SI_SUB_SOFTINTR, SI_ORDER_ANY, start_busdma_swi,