]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/x86/x86/busdma_bounce.c
Upgrade to OpenSSH 7.4p1.
[FreeBSD/FreeBSD.git] / sys / x86 / x86 / busdma_bounce.c
1 /*-
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/bus.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/ktr.h>
37 #include <sys/lock.h>
38 #include <sys/proc.h>
39 #include <sys/memdesc.h>
40 #include <sys/mutex.h>
41 #include <sys/sysctl.h>
42 #include <sys/uio.h>
43
44 #include <vm/vm.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_map.h>
49
50 #include <machine/atomic.h>
51 #include <machine/bus.h>
52 #include <machine/md_var.h>
53 #include <machine/specialreg.h>
54 #include <x86/include/busdma_impl.h>
55
56 #ifdef __i386__
57 #define MAX_BPAGES 512
58 #else
59 #define MAX_BPAGES 8192
60 #endif
61
62 enum {
63         BUS_DMA_COULD_BOUNCE    = 0x01,
64         BUS_DMA_MIN_ALLOC_COMP  = 0x02,
65         BUS_DMA_KMEM_ALLOC      = 0x04,
66 };
67
68 struct bounce_zone;
69
70 struct bus_dma_tag {
71         struct bus_dma_tag_common common;
72         int                     map_count;
73         int                     bounce_flags;
74         bus_dma_segment_t       *segments;
75         struct bounce_zone      *bounce_zone;
76 };
77
78 struct bounce_page {
79         vm_offset_t     vaddr;          /* kva of bounce buffer */
80         bus_addr_t      busaddr;        /* Physical address */
81         vm_offset_t     datavaddr;      /* kva of client data */
82         vm_offset_t     dataoffs;       /* page offset of client data */
83         vm_page_t       datapage[2];    /* physical page(s) of client data */
84         bus_size_t      datacount;      /* client data count */
85         STAILQ_ENTRY(bounce_page) links;
86 };
87
88 int busdma_swi_pending;
89
90 struct bounce_zone {
91         STAILQ_ENTRY(bounce_zone) links;
92         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
93         int             total_bpages;
94         int             free_bpages;
95         int             reserved_bpages;
96         int             active_bpages;
97         int             total_bounced;
98         int             total_deferred;
99         int             map_count;
100         bus_size_t      alignment;
101         bus_addr_t      lowaddr;
102         char            zoneid[8];
103         char            lowaddrid[20];
104         struct sysctl_ctx_list sysctl_tree;
105         struct sysctl_oid *sysctl_tree_top;
106 };
107
108 static struct mtx bounce_lock;
109 static int total_bpages;
110 static int busdma_zonecount;
111 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
112
113 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
114 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
115            "Total bounce pages");
116
117 struct bus_dmamap {
118         struct bp_list         bpages;
119         int                    pagesneeded;
120         int                    pagesreserved;
121         bus_dma_tag_t          dmat;
122         struct memdesc         mem;
123         bus_dmamap_callback_t *callback;
124         void                  *callback_arg;
125         STAILQ_ENTRY(bus_dmamap) links;
126 };
127
128 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
129 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
130 static struct bus_dmamap nobounce_dmamap;
131
132 static void init_bounce_pages(void *dummy);
133 static int alloc_bounce_zone(bus_dma_tag_t dmat);
134 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
135 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
136                                 int commit);
137 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
138                                   vm_offset_t vaddr, bus_addr_t addr1,
139                                   bus_addr_t addr2, bus_size_t size);
140 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
141 int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
142 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
143                                     pmap_t pmap, void *buf, bus_size_t buflen,
144                                     int flags);
145 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
146                                    vm_paddr_t buf, bus_size_t buflen,
147                                    int flags);
148 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
149                                      int flags);
150
151 /*
152  * Allocate a device specific dma_tag.
153  */
154 static int
155 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
156     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
157     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
158     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
159     void *lockfuncarg, bus_dma_tag_t *dmat)
160 {
161         bus_dma_tag_t newtag;
162         int error;
163
164         *dmat = NULL;
165         error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
166             NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
167             maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
168             sizeof (struct bus_dma_tag), (void **)&newtag);
169         if (error != 0)
170                 return (error);
171
172         newtag->common.impl = &bus_dma_bounce_impl;
173         newtag->map_count = 0;
174         newtag->segments = NULL;
175
176         if (parent != NULL && ((newtag->common.filter != NULL) ||
177             ((parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0)))
178                 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
179
180         if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
181             newtag->common.alignment > 1)
182                 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
183
184         if (((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
185             (flags & BUS_DMA_ALLOCNOW) != 0) {
186                 struct bounce_zone *bz;
187
188                 /* Must bounce */
189                 if ((error = alloc_bounce_zone(newtag)) != 0) {
190                         free(newtag, M_DEVBUF);
191                         return (error);
192                 }
193                 bz = newtag->bounce_zone;
194
195                 if (ptoa(bz->total_bpages) < maxsize) {
196                         int pages;
197
198                         pages = atop(maxsize) - bz->total_bpages;
199
200                         /* Add pages to our bounce pool */
201                         if (alloc_bounce_pages(newtag, pages) < pages)
202                                 error = ENOMEM;
203                 }
204                 /* Performed initial allocation */
205                 newtag->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP;
206         } else
207                 error = 0;
208         
209         if (error != 0)
210                 free(newtag, M_DEVBUF);
211         else
212                 *dmat = newtag;
213         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
214             __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
215             error);
216         return (error);
217 }
218
219 static int
220 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
221 {
222         bus_dma_tag_t dmat_copy, parent;
223         int error;
224
225         error = 0;
226         dmat_copy = dmat;
227
228         if (dmat != NULL) {
229                 if (dmat->map_count != 0) {
230                         error = EBUSY;
231                         goto out;
232                 }
233                 while (dmat != NULL) {
234                         parent = (bus_dma_tag_t)dmat->common.parent;
235                         atomic_subtract_int(&dmat->common.ref_count, 1);
236                         if (dmat->common.ref_count == 0) {
237                                 if (dmat->segments != NULL)
238                                         free(dmat->segments, M_DEVBUF);
239                                 free(dmat, M_DEVBUF);
240                                 /*
241                                  * Last reference count, so
242                                  * release our reference
243                                  * count on our parent.
244                                  */
245                                 dmat = parent;
246                         } else
247                                 dmat = NULL;
248                 }
249         }
250 out:
251         CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
252         return (error);
253 }
254
255 /*
256  * Allocate a handle for mapping from kva/uva/physical
257  * address space into bus device space.
258  */
259 static int
260 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
261 {
262         struct bounce_zone *bz;
263         int error, maxpages, pages;
264
265         error = 0;
266
267         if (dmat->segments == NULL) {
268                 dmat->segments = (bus_dma_segment_t *)malloc(
269                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
270                     M_DEVBUF, M_NOWAIT);
271                 if (dmat->segments == NULL) {
272                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
273                             __func__, dmat, ENOMEM);
274                         return (ENOMEM);
275                 }
276         }
277
278         /*
279          * Bouncing might be required if the driver asks for an active
280          * exclusion region, a data alignment that is stricter than 1, and/or
281          * an active address boundary.
282          */
283         if (dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) {
284                 /* Must bounce */
285                 if (dmat->bounce_zone == NULL) {
286                         if ((error = alloc_bounce_zone(dmat)) != 0)
287                                 return (error);
288                 }
289                 bz = dmat->bounce_zone;
290
291                 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
292                     M_NOWAIT | M_ZERO);
293                 if (*mapp == NULL) {
294                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
295                             __func__, dmat, ENOMEM);
296                         return (ENOMEM);
297                 }
298
299                 /* Initialize the new map */
300                 STAILQ_INIT(&((*mapp)->bpages));
301
302                 /*
303                  * Attempt to add pages to our pool on a per-instance
304                  * basis up to a sane limit.
305                  */
306                 if (dmat->common.alignment > 1)
307                         maxpages = MAX_BPAGES;
308                 else
309                         maxpages = MIN(MAX_BPAGES, Maxmem -
310                             atop(dmat->common.lowaddr));
311                 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
312                     (bz->map_count > 0 && bz->total_bpages < maxpages)) {
313                         pages = MAX(atop(dmat->common.maxsize), 1);
314                         pages = MIN(maxpages - bz->total_bpages, pages);
315                         pages = MAX(pages, 1);
316                         if (alloc_bounce_pages(dmat, pages) < pages)
317                                 error = ENOMEM;
318                         if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP)
319                             == 0) {
320                                 if (error == 0) {
321                                         dmat->bounce_flags |=
322                                             BUS_DMA_MIN_ALLOC_COMP;
323                                 }
324                         } else
325                                 error = 0;
326                 }
327                 bz->map_count++;
328         } else {
329                 *mapp = NULL;
330         }
331         if (error == 0)
332                 dmat->map_count++;
333         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
334             __func__, dmat, dmat->common.flags, error);
335         return (error);
336 }
337
338 /*
339  * Destroy a handle for mapping from kva/uva/physical
340  * address space into bus device space.
341  */
342 static int
343 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
344 {
345
346         if (map != NULL && map != &nobounce_dmamap) {
347                 if (STAILQ_FIRST(&map->bpages) != NULL) {
348                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
349                             __func__, dmat, EBUSY);
350                         return (EBUSY);
351                 }
352                 if (dmat->bounce_zone)
353                         dmat->bounce_zone->map_count--;
354                 free(map, M_DEVBUF);
355         }
356         dmat->map_count--;
357         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
358         return (0);
359 }
360
361
362 /*
363  * Allocate a piece of memory that can be efficiently mapped into
364  * bus device space based on the constraints lited in the dma tag.
365  * A dmamap to for use with dmamap_load is also allocated.
366  */
367 static int
368 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
369     bus_dmamap_t *mapp)
370 {
371         vm_memattr_t attr;
372         int mflags;
373
374         if (flags & BUS_DMA_NOWAIT)
375                 mflags = M_NOWAIT;
376         else
377                 mflags = M_WAITOK;
378
379         /* If we succeed, no mapping/bouncing will be required */
380         *mapp = NULL;
381
382         if (dmat->segments == NULL) {
383                 dmat->segments = (bus_dma_segment_t *)malloc(
384                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
385                     M_DEVBUF, mflags);
386                 if (dmat->segments == NULL) {
387                         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
388                             __func__, dmat, dmat->common.flags, ENOMEM);
389                         return (ENOMEM);
390                 }
391         }
392         if (flags & BUS_DMA_ZERO)
393                 mflags |= M_ZERO;
394         if (flags & BUS_DMA_NOCACHE)
395                 attr = VM_MEMATTR_UNCACHEABLE;
396         else
397                 attr = VM_MEMATTR_DEFAULT;
398
399         /* 
400          * XXX:
401          * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
402          * alignment guarantees of malloc need to be nailed down, and the
403          * code below should be rewritten to take that into account.
404          *
405          * In the meantime, we'll warn the user if malloc gets it wrong.
406          */
407         if ((dmat->common.maxsize <= PAGE_SIZE) &&
408            (dmat->common.alignment <= dmat->common.maxsize) &&
409             dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
410             attr == VM_MEMATTR_DEFAULT) {
411                 *vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
412         } else if (dmat->common.nsegments >= btoc(dmat->common.maxsize) &&
413             dmat->common.alignment <= PAGE_SIZE &&
414             (dmat->common.boundary == 0 ||
415             dmat->common.boundary >= dmat->common.lowaddr)) {
416                 /* Page-based multi-segment allocations allowed */
417                 *vaddr = (void *)kmem_alloc_attr(kernel_arena,
418                     dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
419                     attr);
420                 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
421         } else {
422                 *vaddr = (void *)kmem_alloc_contig(kernel_arena,
423                     dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
424                     dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
425                     dmat->common.boundary, attr);
426                 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
427         }
428         if (*vaddr == NULL) {
429                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
430                     __func__, dmat, dmat->common.flags, ENOMEM);
431                 return (ENOMEM);
432         } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
433                 printf("bus_dmamem_alloc failed to align memory properly.\n");
434         }
435         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
436             __func__, dmat, dmat->common.flags, 0);
437         return (0);
438 }
439
440 /*
441  * Free a piece of memory and it's allociated dmamap, that was allocated
442  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
443  */
444 static void
445 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
446 {
447         /*
448          * dmamem does not need to be bounced, so the map should be
449          * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc()
450          * was used and set if kmem_alloc_contig() was used.
451          */
452         if (map != NULL)
453                 panic("bus_dmamem_free: Invalid map freed\n");
454         if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
455                 free(vaddr, M_DEVBUF);
456         else
457                 kmem_free(kernel_arena, (vm_offset_t)vaddr,
458                     dmat->common.maxsize);
459         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
460             dmat->bounce_flags);
461 }
462
463 static void
464 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
465     bus_size_t buflen, int flags)
466 {
467         bus_addr_t curaddr;
468         bus_size_t sgsize;
469
470         if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
471                 /*
472                  * Count the number of bounce pages
473                  * needed in order to complete this transfer
474                  */
475                 curaddr = buf;
476                 while (buflen != 0) {
477                         sgsize = MIN(buflen, dmat->common.maxsegsz);
478                         if (bus_dma_run_filter(&dmat->common, curaddr)) {
479                                 sgsize = MIN(sgsize,
480                                     PAGE_SIZE - (curaddr & PAGE_MASK));
481                                 map->pagesneeded++;
482                         }
483                         curaddr += sgsize;
484                         buflen -= sgsize;
485                 }
486                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
487         }
488 }
489
490 static void
491 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
492     void *buf, bus_size_t buflen, int flags)
493 {
494         vm_offset_t vaddr;
495         vm_offset_t vendaddr;
496         bus_addr_t paddr;
497         bus_size_t sg_len;
498
499         if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
500                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
501                     "alignment= %d", dmat->common.lowaddr,
502                     ptoa((vm_paddr_t)Maxmem),
503                     dmat->common.boundary, dmat->common.alignment);
504                 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
505                     map, &nobounce_dmamap, map->pagesneeded);
506                 /*
507                  * Count the number of bounce pages
508                  * needed in order to complete this transfer
509                  */
510                 vaddr = (vm_offset_t)buf;
511                 vendaddr = (vm_offset_t)buf + buflen;
512
513                 while (vaddr < vendaddr) {
514                         sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
515                         if (pmap == kernel_pmap)
516                                 paddr = pmap_kextract(vaddr);
517                         else
518                                 paddr = pmap_extract(pmap, vaddr);
519                         if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
520                                 sg_len = roundup2(sg_len,
521                                     dmat->common.alignment);
522                                 map->pagesneeded++;
523                         }
524                         vaddr += sg_len;
525                 }
526                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
527         }
528 }
529
530 static void
531 _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
532     int ma_offs, bus_size_t buflen, int flags)
533 {
534         bus_size_t sg_len, max_sgsize;
535         int page_index;
536         vm_paddr_t paddr;
537
538         if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
539                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
540                     "alignment= %d", dmat->common.lowaddr,
541                     ptoa((vm_paddr_t)Maxmem),
542                     dmat->common.boundary, dmat->common.alignment);
543                 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
544                     map, &nobounce_dmamap, map->pagesneeded);
545
546                 /*
547                  * Count the number of bounce pages
548                  * needed in order to complete this transfer
549                  */
550                 page_index = 0;
551                 while (buflen > 0) {
552                         paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
553                         sg_len = PAGE_SIZE - ma_offs;
554                         max_sgsize = MIN(buflen, dmat->common.maxsegsz);
555                         sg_len = MIN(sg_len, max_sgsize);
556                         if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
557                                 sg_len = roundup2(sg_len,
558                                     dmat->common.alignment);
559                                 sg_len = MIN(sg_len, max_sgsize);
560                                 KASSERT((sg_len & (dmat->common.alignment - 1))
561                                     == 0, ("Segment size is not aligned"));
562                                 map->pagesneeded++;
563                         }
564                         if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
565                                 page_index++;
566                         ma_offs = (ma_offs + sg_len) & PAGE_MASK;
567                         KASSERT(buflen >= sg_len,
568                             ("Segment length overruns original buffer"));
569                         buflen -= sg_len;
570                 }
571                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
572         }
573 }
574
575 static int
576 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
577 {
578
579         /* Reserve Necessary Bounce Pages */
580         mtx_lock(&bounce_lock);
581         if (flags & BUS_DMA_NOWAIT) {
582                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
583                         mtx_unlock(&bounce_lock);
584                         return (ENOMEM);
585                 }
586         } else {
587                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
588                         /* Queue us for resources */
589                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
590                         mtx_unlock(&bounce_lock);
591                         return (EINPROGRESS);
592                 }
593         }
594         mtx_unlock(&bounce_lock);
595
596         return (0);
597 }
598
599 /*
600  * Add a single contiguous physical range to the segment list.
601  */
602 static int
603 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
604     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
605 {
606         bus_addr_t baddr, bmask;
607         int seg;
608
609         /*
610          * Make sure we don't cross any boundaries.
611          */
612         bmask = ~(dmat->common.boundary - 1);
613         if (dmat->common.boundary > 0) {
614                 baddr = (curaddr + dmat->common.boundary) & bmask;
615                 if (sgsize > (baddr - curaddr))
616                         sgsize = (baddr - curaddr);
617         }
618
619         /*
620          * Insert chunk into a segment, coalescing with
621          * previous segment if possible.
622          */
623         seg = *segp;
624         if (seg == -1) {
625                 seg = 0;
626                 segs[seg].ds_addr = curaddr;
627                 segs[seg].ds_len = sgsize;
628         } else {
629                 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
630                     (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
631                     (dmat->common.boundary == 0 ||
632                      (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
633                         segs[seg].ds_len += sgsize;
634                 else {
635                         if (++seg >= dmat->common.nsegments)
636                                 return (0);
637                         segs[seg].ds_addr = curaddr;
638                         segs[seg].ds_len = sgsize;
639                 }
640         }
641         *segp = seg;
642         return (sgsize);
643 }
644
645 /*
646  * Utility function to load a physical buffer.  segp contains
647  * the starting segment on entrace, and the ending segment on exit.
648  */
649 static int
650 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
651     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
652     int *segp)
653 {
654         bus_size_t sgsize;
655         bus_addr_t curaddr;
656         int error;
657
658         if (map == NULL)
659                 map = &nobounce_dmamap;
660
661         if (segs == NULL)
662                 segs = dmat->segments;
663
664         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
665                 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
666                 if (map->pagesneeded != 0) {
667                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
668                         if (error)
669                                 return (error);
670                 }
671         }
672
673         while (buflen > 0) {
674                 curaddr = buf;
675                 sgsize = MIN(buflen, dmat->common.maxsegsz);
676                 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
677                     map->pagesneeded != 0 &&
678                     bus_dma_run_filter(&dmat->common, curaddr)) {
679                         sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
680                         curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
681                             sgsize);
682                 }
683                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
684                     segp);
685                 if (sgsize == 0)
686                         break;
687                 buf += sgsize;
688                 buflen -= sgsize;
689         }
690
691         /*
692          * Did we fit?
693          */
694         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
695 }
696
697 /*
698  * Utility function to load a linear buffer.  segp contains
699  * the starting segment on entrace, and the ending segment on exit.
700  */
701 static int
702 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
703     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
704     int *segp)
705 {
706         bus_size_t sgsize, max_sgsize;
707         bus_addr_t curaddr;
708         vm_offset_t kvaddr, vaddr;
709         int error;
710
711         if (map == NULL)
712                 map = &nobounce_dmamap;
713
714         if (segs == NULL)
715                 segs = dmat->segments;
716
717         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
718                 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
719                 if (map->pagesneeded != 0) {
720                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
721                         if (error)
722                                 return (error);
723                 }
724         }
725
726         vaddr = (vm_offset_t)buf;
727         while (buflen > 0) {
728                 /*
729                  * Get the physical address for this segment.
730                  */
731                 if (pmap == kernel_pmap) {
732                         curaddr = pmap_kextract(vaddr);
733                         kvaddr = vaddr;
734                 } else {
735                         curaddr = pmap_extract(pmap, vaddr);
736                         kvaddr = 0;
737                 }
738
739                 /*
740                  * Compute the segment size, and adjust counts.
741                  */
742                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
743                 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
744                 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
745                     map->pagesneeded != 0 &&
746                     bus_dma_run_filter(&dmat->common, curaddr)) {
747                         sgsize = roundup2(sgsize, dmat->common.alignment);
748                         sgsize = MIN(sgsize, max_sgsize);
749                         curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
750                             sgsize);
751                 } else {
752                         sgsize = MIN(sgsize, max_sgsize);
753                 }
754                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
755                     segp);
756                 if (sgsize == 0)
757                         break;
758                 vaddr += sgsize;
759                 buflen -= sgsize;
760         }
761
762         /*
763          * Did we fit?
764          */
765         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
766 }
767
768 static int
769 bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
770     struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,
771     bus_dma_segment_t *segs, int *segp)
772 {
773         vm_paddr_t paddr, next_paddr;
774         int error, page_index;
775         bus_size_t sgsize, max_sgsize;
776
777         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
778                 /*
779                  * If we have to keep the offset of each page this function
780                  * is not suitable, switch back to bus_dmamap_load_ma_triv
781                  * which is going to do the right thing in this case.
782                  */
783                 error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
784                     flags, segs, segp);
785                 return (error);
786         }
787
788         if (map == NULL)
789                 map = &nobounce_dmamap;
790
791         if (segs == NULL)
792                 segs = dmat->segments;
793
794         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
795                 _bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags);
796                 if (map->pagesneeded != 0) {
797                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
798                         if (error)
799                                 return (error);
800                 }
801         }
802
803         page_index = 0;
804         while (buflen > 0) {
805                 /*
806                  * Compute the segment size, and adjust counts.
807                  */
808                 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
809                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
810                 sgsize = PAGE_SIZE - ma_offs;
811                 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
812                     map->pagesneeded != 0 &&
813                     bus_dma_run_filter(&dmat->common, paddr)) {
814                         sgsize = roundup2(sgsize, dmat->common.alignment);
815                         sgsize = MIN(sgsize, max_sgsize);
816                         KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
817                             ("Segment size is not aligned"));
818                         /*
819                          * Check if two pages of the user provided buffer
820                          * are used.
821                          */
822                         if ((ma_offs + sgsize) > PAGE_SIZE)
823                                 next_paddr =
824                                     VM_PAGE_TO_PHYS(ma[page_index + 1]);
825                         else
826                                 next_paddr = 0;
827                         paddr = add_bounce_page(dmat, map, 0, paddr,
828                             next_paddr, sgsize);
829                 } else {
830                         sgsize = MIN(sgsize, max_sgsize);
831                 }
832                 sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs,
833                     segp);
834                 if (sgsize == 0)
835                         break;
836                 KASSERT(buflen >= sgsize,
837                     ("Segment length overruns original buffer"));
838                 buflen -= sgsize;
839                 if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
840                         page_index++;
841                 ma_offs = (ma_offs + sgsize) & PAGE_MASK;
842         }
843
844         /*
845          * Did we fit?
846          */
847         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
848 }
849
850 static void
851 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
852     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
853 {
854
855         if (map == NULL)
856                 return;
857         map->mem = *mem;
858         map->dmat = dmat;
859         map->callback = callback;
860         map->callback_arg = callback_arg;
861 }
862
863 static bus_dma_segment_t *
864 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
865     bus_dma_segment_t *segs, int nsegs, int error)
866 {
867
868         if (segs == NULL)
869                 segs = dmat->segments;
870         return (segs);
871 }
872
873 /*
874  * Release the mapping held by map.
875  */
876 static void
877 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
878 {
879         struct bounce_page *bpage;
880
881         if (map == NULL)
882                 return;
883
884         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
885                 STAILQ_REMOVE_HEAD(&map->bpages, links);
886                 free_bounce_page(dmat, bpage);
887         }
888 }
889
890 static void
891 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
892     bus_dmasync_op_t op)
893 {
894         struct bounce_page *bpage;
895         vm_offset_t datavaddr, tempvaddr;
896         bus_size_t datacount1, datacount2;
897
898         if (map == NULL || (bpage = STAILQ_FIRST(&map->bpages)) == NULL)
899                 return;
900
901         /*
902          * Handle data bouncing.  We might also want to add support for
903          * invalidating the caches on broken hardware.
904          */
905         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
906             "performing bounce", __func__, dmat, dmat->common.flags, op);
907
908         if ((op & BUS_DMASYNC_PREWRITE) != 0) {
909                 while (bpage != NULL) {
910                         tempvaddr = 0;
911                         datavaddr = bpage->datavaddr;
912                         datacount1 = bpage->datacount;
913                         if (datavaddr == 0) {
914                                 tempvaddr =
915                                     pmap_quick_enter_page(bpage->datapage[0]);
916                                 datavaddr = tempvaddr | bpage->dataoffs;
917                                 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
918                                     datacount1);
919                         }
920
921                         bcopy((void *)datavaddr,
922                             (void *)bpage->vaddr, datacount1);
923
924                         if (tempvaddr != 0)
925                                 pmap_quick_remove_page(tempvaddr);
926
927                         if (bpage->datapage[1] == 0) {
928                                 KASSERT(datacount1 == bpage->datacount,
929                 ("Mismatch between data size and provided memory space"));
930                                 goto next_w;
931                         }
932
933                         /*
934                          * We are dealing with an unmapped buffer that expands
935                          * over two pages.
936                          */
937                         datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
938                         datacount2 = bpage->datacount - datacount1;
939                         bcopy((void *)datavaddr,
940                             (void *)(bpage->vaddr + datacount1), datacount2);
941                         pmap_quick_remove_page(datavaddr);
942
943 next_w:
944                         bpage = STAILQ_NEXT(bpage, links);
945                 }
946                 dmat->bounce_zone->total_bounced++;
947         }
948
949         if ((op & BUS_DMASYNC_POSTREAD) != 0) {
950                 while (bpage != NULL) {
951                         tempvaddr = 0;
952                         datavaddr = bpage->datavaddr;
953                         datacount1 = bpage->datacount;
954                         if (datavaddr == 0) {
955                                 tempvaddr =
956                                     pmap_quick_enter_page(bpage->datapage[0]);
957                                 datavaddr = tempvaddr | bpage->dataoffs;
958                                 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
959                                     datacount1);
960                         }
961
962                         bcopy((void *)bpage->vaddr, (void *)datavaddr,
963                             datacount1);
964
965                         if (tempvaddr != 0)
966                                 pmap_quick_remove_page(tempvaddr);
967
968                         if (bpage->datapage[1] == 0) {
969                                 KASSERT(datacount1 == bpage->datacount,
970                 ("Mismatch between data size and provided memory space"));
971                                 goto next_r;
972                         }
973
974                         /*
975                          * We are dealing with an unmapped buffer that expands
976                          * over two pages.
977                          */
978                         datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
979                         datacount2 = bpage->datacount - datacount1;
980                         bcopy((void *)(bpage->vaddr + datacount1),
981                             (void *)datavaddr, datacount2);
982                         pmap_quick_remove_page(datavaddr);
983
984 next_r:
985                         bpage = STAILQ_NEXT(bpage, links);
986                 }
987                 dmat->bounce_zone->total_bounced++;
988         }
989 }
990
991 static void
992 init_bounce_pages(void *dummy __unused)
993 {
994
995         total_bpages = 0;
996         STAILQ_INIT(&bounce_zone_list);
997         STAILQ_INIT(&bounce_map_waitinglist);
998         STAILQ_INIT(&bounce_map_callbacklist);
999         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1000 }
1001 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1002
1003 static struct sysctl_ctx_list *
1004 busdma_sysctl_tree(struct bounce_zone *bz)
1005 {
1006
1007         return (&bz->sysctl_tree);
1008 }
1009
1010 static struct sysctl_oid *
1011 busdma_sysctl_tree_top(struct bounce_zone *bz)
1012 {
1013
1014         return (bz->sysctl_tree_top);
1015 }
1016
1017 static int
1018 alloc_bounce_zone(bus_dma_tag_t dmat)
1019 {
1020         struct bounce_zone *bz;
1021
1022         /* Check to see if we already have a suitable zone */
1023         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1024                 if ((dmat->common.alignment <= bz->alignment) &&
1025                     (dmat->common.lowaddr >= bz->lowaddr)) {
1026                         dmat->bounce_zone = bz;
1027                         return (0);
1028                 }
1029         }
1030
1031         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1032             M_NOWAIT | M_ZERO)) == NULL)
1033                 return (ENOMEM);
1034
1035         STAILQ_INIT(&bz->bounce_page_list);
1036         bz->free_bpages = 0;
1037         bz->reserved_bpages = 0;
1038         bz->active_bpages = 0;
1039         bz->lowaddr = dmat->common.lowaddr;
1040         bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
1041         bz->map_count = 0;
1042         snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1043         busdma_zonecount++;
1044         snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1045         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1046         dmat->bounce_zone = bz;
1047
1048         sysctl_ctx_init(&bz->sysctl_tree);
1049         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1050             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1051             CTLFLAG_RD, 0, "");
1052         if (bz->sysctl_tree_top == NULL) {
1053                 sysctl_ctx_free(&bz->sysctl_tree);
1054                 return (0);     /* XXX error code? */
1055         }
1056
1057         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1058             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1059             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1060             "Total bounce pages");
1061         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1062             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1063             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1064             "Free bounce pages");
1065         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1066             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1067             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1068             "Reserved bounce pages");
1069         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1070             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1071             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1072             "Active bounce pages");
1073         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1074             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1075             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1076             "Total bounce requests");
1077         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1078             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1079             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1080             "Total bounce requests that were deferred");
1081         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1082             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1083             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1084         SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1085             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1086             "alignment", CTLFLAG_RD, &bz->alignment, "");
1087
1088         return (0);
1089 }
1090
1091 static int
1092 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1093 {
1094         struct bounce_zone *bz;
1095         int count;
1096
1097         bz = dmat->bounce_zone;
1098         count = 0;
1099         while (numpages > 0) {
1100                 struct bounce_page *bpage;
1101
1102                 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1103                                                      M_NOWAIT | M_ZERO);
1104
1105                 if (bpage == NULL)
1106                         break;
1107                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1108                                                          M_NOWAIT, 0ul,
1109                                                          bz->lowaddr,
1110                                                          PAGE_SIZE,
1111                                                          0);
1112                 if (bpage->vaddr == 0) {
1113                         free(bpage, M_DEVBUF);
1114                         break;
1115                 }
1116                 bpage->busaddr = pmap_kextract(bpage->vaddr);
1117                 mtx_lock(&bounce_lock);
1118                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1119                 total_bpages++;
1120                 bz->total_bpages++;
1121                 bz->free_bpages++;
1122                 mtx_unlock(&bounce_lock);
1123                 count++;
1124                 numpages--;
1125         }
1126         return (count);
1127 }
1128
1129 static int
1130 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1131 {
1132         struct bounce_zone *bz;
1133         int pages;
1134
1135         mtx_assert(&bounce_lock, MA_OWNED);
1136         bz = dmat->bounce_zone;
1137         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1138         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1139                 return (map->pagesneeded - (map->pagesreserved + pages));
1140         bz->free_bpages -= pages;
1141         bz->reserved_bpages += pages;
1142         map->pagesreserved += pages;
1143         pages = map->pagesneeded - map->pagesreserved;
1144
1145         return (pages);
1146 }
1147
1148 static bus_addr_t
1149 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1150                 bus_addr_t addr1, bus_addr_t addr2, bus_size_t size)
1151 {
1152         struct bounce_zone *bz;
1153         struct bounce_page *bpage;
1154
1155         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1156         KASSERT(map != NULL && map != &nobounce_dmamap,
1157             ("add_bounce_page: bad map %p", map));
1158
1159         bz = dmat->bounce_zone;
1160         if (map->pagesneeded == 0)
1161                 panic("add_bounce_page: map doesn't need any pages");
1162         map->pagesneeded--;
1163
1164         if (map->pagesreserved == 0)
1165                 panic("add_bounce_page: map doesn't need any pages");
1166         map->pagesreserved--;
1167
1168         mtx_lock(&bounce_lock);
1169         bpage = STAILQ_FIRST(&bz->bounce_page_list);
1170         if (bpage == NULL)
1171                 panic("add_bounce_page: free page list is empty");
1172
1173         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1174         bz->reserved_bpages--;
1175         bz->active_bpages++;
1176         mtx_unlock(&bounce_lock);
1177
1178         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1179                 /* Page offset needs to be preserved. */
1180                 bpage->vaddr |= addr1 & PAGE_MASK;
1181                 bpage->busaddr |= addr1 & PAGE_MASK;
1182                 KASSERT(addr2 == 0,
1183         ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
1184         }
1185         bpage->datavaddr = vaddr;
1186         bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
1187         KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
1188         bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
1189         bpage->dataoffs = addr1 & PAGE_MASK;
1190         bpage->datacount = size;
1191         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1192         return (bpage->busaddr);
1193 }
1194
1195 static void
1196 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1197 {
1198         struct bus_dmamap *map;
1199         struct bounce_zone *bz;
1200
1201         bz = dmat->bounce_zone;
1202         bpage->datavaddr = 0;
1203         bpage->datacount = 0;
1204         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1205                 /*
1206                  * Reset the bounce page to start at offset 0.  Other uses
1207                  * of this bounce page may need to store a full page of
1208                  * data and/or assume it starts on a page boundary.
1209                  */
1210                 bpage->vaddr &= ~PAGE_MASK;
1211                 bpage->busaddr &= ~PAGE_MASK;
1212         }
1213
1214         mtx_lock(&bounce_lock);
1215         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1216         bz->free_bpages++;
1217         bz->active_bpages--;
1218         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1219                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1220                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1221                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1222                             map, links);
1223                         busdma_swi_pending = 1;
1224                         bz->total_deferred++;
1225                         swi_sched(vm_ih, 0);
1226                 }
1227         }
1228         mtx_unlock(&bounce_lock);
1229 }
1230
1231 void
1232 busdma_swi(void)
1233 {
1234         bus_dma_tag_t dmat;
1235         struct bus_dmamap *map;
1236
1237         mtx_lock(&bounce_lock);
1238         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1239                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1240                 mtx_unlock(&bounce_lock);
1241                 dmat = map->dmat;
1242                 (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
1243                 bus_dmamap_load_mem(map->dmat, map, &map->mem,
1244                     map->callback, map->callback_arg, BUS_DMA_WAITOK);
1245                 (dmat->common.lockfunc)(dmat->common.lockfuncarg,
1246                     BUS_DMA_UNLOCK);
1247                 mtx_lock(&bounce_lock);
1248         }
1249         mtx_unlock(&bounce_lock);
1250 }
1251
1252 struct bus_dma_impl bus_dma_bounce_impl = {
1253         .tag_create = bounce_bus_dma_tag_create,
1254         .tag_destroy = bounce_bus_dma_tag_destroy,
1255         .map_create = bounce_bus_dmamap_create,
1256         .map_destroy = bounce_bus_dmamap_destroy,
1257         .mem_alloc = bounce_bus_dmamem_alloc,
1258         .mem_free = bounce_bus_dmamem_free,
1259         .load_phys = bounce_bus_dmamap_load_phys,
1260         .load_buffer = bounce_bus_dmamap_load_buffer,
1261         .load_ma = bounce_bus_dmamap_load_ma,
1262         .map_waitok = bounce_bus_dmamap_waitok,
1263         .map_complete = bounce_bus_dmamap_complete,
1264         .map_unload = bounce_bus_dmamap_unload,
1265         .map_sync = bounce_bus_dmamap_sync
1266 };