]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/x86/x86/busdma_bounce.c
Fix an inverted test in ucode_load_ap().
[FreeBSD/FreeBSD.git] / sys / x86 / x86 / busdma_bounce.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/bus.h>
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/proc.h>
41 #include <sys/memdesc.h>
42 #include <sys/mutex.h>
43 #include <sys/sysctl.h>
44 #include <sys/uio.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_map.h>
51
52 #include <machine/atomic.h>
53 #include <machine/bus.h>
54 #include <machine/md_var.h>
55 #include <machine/specialreg.h>
56 #include <x86/include/busdma_impl.h>
57
58 #ifdef __i386__
59 #define MAX_BPAGES 512
60 #else
61 #define MAX_BPAGES 8192
62 #endif
63
64 enum {
65         BUS_DMA_COULD_BOUNCE    = 0x01,
66         BUS_DMA_MIN_ALLOC_COMP  = 0x02,
67         BUS_DMA_KMEM_ALLOC      = 0x04,
68 };
69
70 struct bounce_zone;
71
72 struct bus_dma_tag {
73         struct bus_dma_tag_common common;
74         int                     map_count;
75         int                     bounce_flags;
76         bus_dma_segment_t       *segments;
77         struct bounce_zone      *bounce_zone;
78 };
79
80 struct bounce_page {
81         vm_offset_t     vaddr;          /* kva of bounce buffer */
82         bus_addr_t      busaddr;        /* Physical address */
83         vm_offset_t     datavaddr;      /* kva of client data */
84         vm_offset_t     dataoffs;       /* page offset of client data */
85         vm_page_t       datapage[2];    /* physical page(s) of client data */
86         bus_size_t      datacount;      /* client data count */
87         STAILQ_ENTRY(bounce_page) links;
88 };
89
90 int busdma_swi_pending;
91
92 struct bounce_zone {
93         STAILQ_ENTRY(bounce_zone) links;
94         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
95         int             total_bpages;
96         int             free_bpages;
97         int             reserved_bpages;
98         int             active_bpages;
99         int             total_bounced;
100         int             total_deferred;
101         int             map_count;
102         int             domain;
103         bus_size_t      alignment;
104         bus_addr_t      lowaddr;
105         char            zoneid[8];
106         char            lowaddrid[20];
107         struct sysctl_ctx_list sysctl_tree;
108         struct sysctl_oid *sysctl_tree_top;
109 };
110
111 static struct mtx bounce_lock;
112 static int total_bpages;
113 static int busdma_zonecount;
114 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
115
116 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
117 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
118            "Total bounce pages");
119
120 struct bus_dmamap {
121         struct bp_list         bpages;
122         int                    pagesneeded;
123         int                    pagesreserved;
124         bus_dma_tag_t          dmat;
125         struct memdesc         mem;
126         bus_dmamap_callback_t *callback;
127         void                  *callback_arg;
128         STAILQ_ENTRY(bus_dmamap) links;
129 };
130
131 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
132 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
133 static struct bus_dmamap nobounce_dmamap;
134
135 static void init_bounce_pages(void *dummy);
136 static int alloc_bounce_zone(bus_dma_tag_t dmat);
137 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
138 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
139                                 int commit);
140 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
141                                   vm_offset_t vaddr, bus_addr_t addr1,
142                                   bus_addr_t addr2, bus_size_t size);
143 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
144 int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
145 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
146                                     pmap_t pmap, void *buf, bus_size_t buflen,
147                                     int flags);
148 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
149                                    vm_paddr_t buf, bus_size_t buflen,
150                                    int flags);
151 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
152                                      int flags);
153
154 static int
155 bounce_bus_dma_zone_setup(bus_dma_tag_t dmat)
156 {
157         struct bounce_zone *bz;
158         int error;
159
160         /* Must bounce */
161         if ((error = alloc_bounce_zone(dmat)) != 0)
162                 return (error);
163         bz = dmat->bounce_zone;
164
165         if (ptoa(bz->total_bpages) < dmat->common.maxsize) {
166                 int pages;
167
168                 pages = atop(dmat->common.maxsize) - bz->total_bpages;
169
170                 /* Add pages to our bounce pool */
171                 if (alloc_bounce_pages(dmat, pages) < pages)
172                         return (ENOMEM);
173         }
174         /* Performed initial allocation */
175         dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP;
176
177         return (0);
178 }
179
180 /*
181  * Allocate a device specific dma_tag.
182  */
183 static int
184 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
185     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
186     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
187     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
188     void *lockfuncarg, bus_dma_tag_t *dmat)
189 {
190         bus_dma_tag_t newtag;
191         int error;
192
193         *dmat = NULL;
194         error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
195             NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
196             maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
197             sizeof (struct bus_dma_tag), (void **)&newtag);
198         if (error != 0)
199                 return (error);
200
201         newtag->common.impl = &bus_dma_bounce_impl;
202         newtag->map_count = 0;
203         newtag->segments = NULL;
204
205         if (parent != NULL && ((newtag->common.filter != NULL) ||
206             ((parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0)))
207                 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
208
209         if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
210             newtag->common.alignment > 1)
211                 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
212
213         if (((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
214             (flags & BUS_DMA_ALLOCNOW) != 0)
215                 error = bounce_bus_dma_zone_setup(newtag);
216         else
217                 error = 0;
218         
219         if (error != 0)
220                 free(newtag, M_DEVBUF);
221         else
222                 *dmat = newtag;
223         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
224             __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
225             error);
226         return (error);
227 }
228
229 /*
230  * Update the domain for the tag.  We may need to reallocate the zone and
231  * bounce pages.
232  */ 
233 static int
234 bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
235 {
236
237         KASSERT(dmat->map_count == 0,
238             ("bounce_bus_dma_tag_set_domain:  Domain set after use.\n"));
239         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0 ||
240             dmat->bounce_zone == NULL)
241                 return (0);
242         dmat->bounce_flags &= ~BUS_DMA_MIN_ALLOC_COMP;
243         return (bounce_bus_dma_zone_setup(dmat));
244 }
245
246 static int
247 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
248 {
249         bus_dma_tag_t dmat_copy, parent;
250         int error;
251
252         error = 0;
253         dmat_copy = dmat;
254
255         if (dmat != NULL) {
256                 if (dmat->map_count != 0) {
257                         error = EBUSY;
258                         goto out;
259                 }
260                 while (dmat != NULL) {
261                         parent = (bus_dma_tag_t)dmat->common.parent;
262                         atomic_subtract_int(&dmat->common.ref_count, 1);
263                         if (dmat->common.ref_count == 0) {
264                                 if (dmat->segments != NULL)
265                                         free_domain(dmat->segments, M_DEVBUF);
266                                 free(dmat, M_DEVBUF);
267                                 /*
268                                  * Last reference count, so
269                                  * release our reference
270                                  * count on our parent.
271                                  */
272                                 dmat = parent;
273                         } else
274                                 dmat = NULL;
275                 }
276         }
277 out:
278         CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
279         return (error);
280 }
281
282 /*
283  * Allocate a handle for mapping from kva/uva/physical
284  * address space into bus device space.
285  */
286 static int
287 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
288 {
289         struct bounce_zone *bz;
290         int error, maxpages, pages;
291
292         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__);
293
294         error = 0;
295
296         if (dmat->segments == NULL) {
297                 dmat->segments = (bus_dma_segment_t *)malloc_domain(
298                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
299                     M_DEVBUF, dmat->common.domain, M_NOWAIT);
300                 if (dmat->segments == NULL) {
301                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
302                             __func__, dmat, ENOMEM);
303                         return (ENOMEM);
304                 }
305         }
306
307         /*
308          * Bouncing might be required if the driver asks for an active
309          * exclusion region, a data alignment that is stricter than 1, and/or
310          * an active address boundary.
311          */
312         if (dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) {
313                 /* Must bounce */
314                 if (dmat->bounce_zone == NULL) {
315                         if ((error = alloc_bounce_zone(dmat)) != 0)
316                                 return (error);
317                 }
318                 bz = dmat->bounce_zone;
319
320                 *mapp = (bus_dmamap_t)malloc_domain(sizeof(**mapp), M_DEVBUF,
321                     dmat->common.domain, M_NOWAIT | M_ZERO);
322                 if (*mapp == NULL) {
323                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
324                             __func__, dmat, ENOMEM);
325                         return (ENOMEM);
326                 }
327
328                 /* Initialize the new map */
329                 STAILQ_INIT(&((*mapp)->bpages));
330
331                 /*
332                  * Attempt to add pages to our pool on a per-instance
333                  * basis up to a sane limit.
334                  */
335                 if (dmat->common.alignment > 1)
336                         maxpages = MAX_BPAGES;
337                 else
338                         maxpages = MIN(MAX_BPAGES, Maxmem -
339                             atop(dmat->common.lowaddr));
340                 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
341                     (bz->map_count > 0 && bz->total_bpages < maxpages)) {
342                         pages = MAX(atop(dmat->common.maxsize), 1);
343                         pages = MIN(maxpages - bz->total_bpages, pages);
344                         pages = MAX(pages, 1);
345                         if (alloc_bounce_pages(dmat, pages) < pages)
346                                 error = ENOMEM;
347                         if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP)
348                             == 0) {
349                                 if (error == 0) {
350                                         dmat->bounce_flags |=
351                                             BUS_DMA_MIN_ALLOC_COMP;
352                                 }
353                         } else
354                                 error = 0;
355                 }
356                 bz->map_count++;
357         } else {
358                 *mapp = NULL;
359         }
360         if (error == 0)
361                 dmat->map_count++;
362         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
363             __func__, dmat, dmat->common.flags, error);
364         return (error);
365 }
366
367 /*
368  * Destroy a handle for mapping from kva/uva/physical
369  * address space into bus device space.
370  */
371 static int
372 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
373 {
374
375         if (map != NULL && map != &nobounce_dmamap) {
376                 if (STAILQ_FIRST(&map->bpages) != NULL) {
377                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
378                             __func__, dmat, EBUSY);
379                         return (EBUSY);
380                 }
381                 if (dmat->bounce_zone)
382                         dmat->bounce_zone->map_count--;
383                 free_domain(map, M_DEVBUF);
384         }
385         dmat->map_count--;
386         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
387         return (0);
388 }
389
390
391 /*
392  * Allocate a piece of memory that can be efficiently mapped into
393  * bus device space based on the constraints lited in the dma tag.
394  * A dmamap to for use with dmamap_load is also allocated.
395  */
396 static int
397 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
398     bus_dmamap_t *mapp)
399 {
400         vm_memattr_t attr;
401         int mflags;
402
403         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__);
404
405         if (flags & BUS_DMA_NOWAIT)
406                 mflags = M_NOWAIT;
407         else
408                 mflags = M_WAITOK;
409
410         /* If we succeed, no mapping/bouncing will be required */
411         *mapp = NULL;
412
413         if (dmat->segments == NULL) {
414                 dmat->segments = (bus_dma_segment_t *)malloc_domain(
415                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
416                     M_DEVBUF, dmat->common.domain, mflags);
417                 if (dmat->segments == NULL) {
418                         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
419                             __func__, dmat, dmat->common.flags, ENOMEM);
420                         return (ENOMEM);
421                 }
422         }
423         if (flags & BUS_DMA_ZERO)
424                 mflags |= M_ZERO;
425         if (flags & BUS_DMA_NOCACHE)
426                 attr = VM_MEMATTR_UNCACHEABLE;
427         else
428                 attr = VM_MEMATTR_DEFAULT;
429
430         /*
431          * Allocate the buffer from the malloc(9) allocator if...
432          *  - It's small enough to fit into a single power of two sized bucket.
433          *  - The alignment is less than or equal to the maximum size
434          *  - The low address requirement is fulfilled.
435          * else allocate non-contiguous pages if...
436          *  - The page count that could get allocated doesn't exceed
437          *    nsegments also when the maximum segment size is less
438          *    than PAGE_SIZE.
439          *  - The alignment constraint isn't larger than a page boundary.
440          *  - There are no boundary-crossing constraints.
441          * else allocate a block of contiguous pages because one or more of the
442          * constraints is something that only the contig allocator can fulfill.
443          *
444          * NOTE: The (dmat->common.alignment <= dmat->maxsize) check
445          * below is just a quick hack. The exact alignment guarantees
446          * of malloc(9) need to be nailed down, and the code below
447          * should be rewritten to take that into account.
448          *
449          * In the meantime warn the user if malloc gets it wrong.
450          */
451         if ((dmat->common.maxsize <= PAGE_SIZE) &&
452            (dmat->common.alignment <= dmat->common.maxsize) &&
453             dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
454             attr == VM_MEMATTR_DEFAULT) {
455                 *vaddr = malloc_domain(dmat->common.maxsize, M_DEVBUF,
456                     dmat->common.domain, mflags);
457         } else if (dmat->common.nsegments >=
458             howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
459             dmat->common.alignment <= PAGE_SIZE &&
460             (dmat->common.boundary % PAGE_SIZE) == 0) {
461                 /* Page-based multi-segment allocations allowed */
462                 *vaddr = (void *)kmem_alloc_attr_domain(dmat->common.domain,
463                     dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
464                     attr);
465                 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
466         } else {
467                 *vaddr = (void *)kmem_alloc_contig_domain(dmat->common.domain,
468                     dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
469                     dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
470                     dmat->common.boundary, attr);
471                 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
472         }
473         if (*vaddr == NULL) {
474                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
475                     __func__, dmat, dmat->common.flags, ENOMEM);
476                 return (ENOMEM);
477         } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
478                 printf("bus_dmamem_alloc failed to align memory properly.\n");
479         }
480         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
481             __func__, dmat, dmat->common.flags, 0);
482         return (0);
483 }
484
485 /*
486  * Free a piece of memory and it's allociated dmamap, that was allocated
487  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
488  */
489 static void
490 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
491 {
492         /*
493          * dmamem does not need to be bounced, so the map should be
494          * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc()
495          * was used and set if kmem_alloc_contig() was used.
496          */
497         if (map != NULL)
498                 panic("bus_dmamem_free: Invalid map freed\n");
499         if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
500                 free_domain(vaddr, M_DEVBUF);
501         else
502                 kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
503         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
504             dmat->bounce_flags);
505 }
506
507 static void
508 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
509     bus_size_t buflen, int flags)
510 {
511         bus_addr_t curaddr;
512         bus_size_t sgsize;
513
514         if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
515                 /*
516                  * Count the number of bounce pages
517                  * needed in order to complete this transfer
518                  */
519                 curaddr = buf;
520                 while (buflen != 0) {
521                         sgsize = MIN(buflen, dmat->common.maxsegsz);
522                         if (bus_dma_run_filter(&dmat->common, curaddr)) {
523                                 sgsize = MIN(sgsize,
524                                     PAGE_SIZE - (curaddr & PAGE_MASK));
525                                 map->pagesneeded++;
526                         }
527                         curaddr += sgsize;
528                         buflen -= sgsize;
529                 }
530                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
531         }
532 }
533
534 static void
535 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
536     void *buf, bus_size_t buflen, int flags)
537 {
538         vm_offset_t vaddr;
539         vm_offset_t vendaddr;
540         bus_addr_t paddr;
541         bus_size_t sg_len;
542
543         if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
544                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
545                     "alignment= %d", dmat->common.lowaddr,
546                     ptoa((vm_paddr_t)Maxmem),
547                     dmat->common.boundary, dmat->common.alignment);
548                 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
549                     map, &nobounce_dmamap, map->pagesneeded);
550                 /*
551                  * Count the number of bounce pages
552                  * needed in order to complete this transfer
553                  */
554                 vaddr = (vm_offset_t)buf;
555                 vendaddr = (vm_offset_t)buf + buflen;
556
557                 while (vaddr < vendaddr) {
558                         sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
559                         if (pmap == kernel_pmap)
560                                 paddr = pmap_kextract(vaddr);
561                         else
562                                 paddr = pmap_extract(pmap, vaddr);
563                         if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
564                                 sg_len = roundup2(sg_len,
565                                     dmat->common.alignment);
566                                 map->pagesneeded++;
567                         }
568                         vaddr += sg_len;
569                 }
570                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
571         }
572 }
573
574 static void
575 _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
576     int ma_offs, bus_size_t buflen, int flags)
577 {
578         bus_size_t sg_len, max_sgsize;
579         int page_index;
580         vm_paddr_t paddr;
581
582         if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
583                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
584                     "alignment= %d", dmat->common.lowaddr,
585                     ptoa((vm_paddr_t)Maxmem),
586                     dmat->common.boundary, dmat->common.alignment);
587                 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
588                     map, &nobounce_dmamap, map->pagesneeded);
589
590                 /*
591                  * Count the number of bounce pages
592                  * needed in order to complete this transfer
593                  */
594                 page_index = 0;
595                 while (buflen > 0) {
596                         paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
597                         sg_len = PAGE_SIZE - ma_offs;
598                         max_sgsize = MIN(buflen, dmat->common.maxsegsz);
599                         sg_len = MIN(sg_len, max_sgsize);
600                         if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
601                                 sg_len = roundup2(sg_len,
602                                     dmat->common.alignment);
603                                 sg_len = MIN(sg_len, max_sgsize);
604                                 KASSERT((sg_len & (dmat->common.alignment - 1))
605                                     == 0, ("Segment size is not aligned"));
606                                 map->pagesneeded++;
607                         }
608                         if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
609                                 page_index++;
610                         ma_offs = (ma_offs + sg_len) & PAGE_MASK;
611                         KASSERT(buflen >= sg_len,
612                             ("Segment length overruns original buffer"));
613                         buflen -= sg_len;
614                 }
615                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
616         }
617 }
618
619 static int
620 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
621 {
622
623         /* Reserve Necessary Bounce Pages */
624         mtx_lock(&bounce_lock);
625         if (flags & BUS_DMA_NOWAIT) {
626                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
627                         mtx_unlock(&bounce_lock);
628                         return (ENOMEM);
629                 }
630         } else {
631                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
632                         /* Queue us for resources */
633                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
634                         mtx_unlock(&bounce_lock);
635                         return (EINPROGRESS);
636                 }
637         }
638         mtx_unlock(&bounce_lock);
639
640         return (0);
641 }
642
643 /*
644  * Add a single contiguous physical range to the segment list.
645  */
646 static int
647 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
648     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
649 {
650         bus_addr_t baddr, bmask;
651         int seg;
652
653         /*
654          * Make sure we don't cross any boundaries.
655          */
656         bmask = ~(dmat->common.boundary - 1);
657         if (dmat->common.boundary > 0) {
658                 baddr = (curaddr + dmat->common.boundary) & bmask;
659                 if (sgsize > (baddr - curaddr))
660                         sgsize = (baddr - curaddr);
661         }
662
663         /*
664          * Insert chunk into a segment, coalescing with
665          * previous segment if possible.
666          */
667         seg = *segp;
668         if (seg == -1) {
669                 seg = 0;
670                 segs[seg].ds_addr = curaddr;
671                 segs[seg].ds_len = sgsize;
672         } else {
673                 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
674                     (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
675                     (dmat->common.boundary == 0 ||
676                      (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
677                         segs[seg].ds_len += sgsize;
678                 else {
679                         if (++seg >= dmat->common.nsegments)
680                                 return (0);
681                         segs[seg].ds_addr = curaddr;
682                         segs[seg].ds_len = sgsize;
683                 }
684         }
685         *segp = seg;
686         return (sgsize);
687 }
688
689 /*
690  * Utility function to load a physical buffer.  segp contains
691  * the starting segment on entrace, and the ending segment on exit.
692  */
693 static int
694 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
695     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
696     int *segp)
697 {
698         bus_size_t sgsize;
699         bus_addr_t curaddr;
700         int error;
701
702         if (map == NULL)
703                 map = &nobounce_dmamap;
704
705         if (segs == NULL)
706                 segs = dmat->segments;
707
708         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
709                 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
710                 if (map->pagesneeded != 0) {
711                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
712                         if (error)
713                                 return (error);
714                 }
715         }
716
717         while (buflen > 0) {
718                 curaddr = buf;
719                 sgsize = MIN(buflen, dmat->common.maxsegsz);
720                 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
721                     map->pagesneeded != 0 &&
722                     bus_dma_run_filter(&dmat->common, curaddr)) {
723                         sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
724                         curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
725                             sgsize);
726                 }
727                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
728                     segp);
729                 if (sgsize == 0)
730                         break;
731                 buf += sgsize;
732                 buflen -= sgsize;
733         }
734
735         /*
736          * Did we fit?
737          */
738         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
739 }
740
741 /*
742  * Utility function to load a linear buffer.  segp contains
743  * the starting segment on entrace, and the ending segment on exit.
744  */
745 static int
746 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
747     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
748     int *segp)
749 {
750         bus_size_t sgsize, max_sgsize;
751         bus_addr_t curaddr;
752         vm_offset_t kvaddr, vaddr;
753         int error;
754
755         if (map == NULL)
756                 map = &nobounce_dmamap;
757
758         if (segs == NULL)
759                 segs = dmat->segments;
760
761         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
762                 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
763                 if (map->pagesneeded != 0) {
764                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
765                         if (error)
766                                 return (error);
767                 }
768         }
769
770         vaddr = (vm_offset_t)buf;
771         while (buflen > 0) {
772                 /*
773                  * Get the physical address for this segment.
774                  */
775                 if (pmap == kernel_pmap) {
776                         curaddr = pmap_kextract(vaddr);
777                         kvaddr = vaddr;
778                 } else {
779                         curaddr = pmap_extract(pmap, vaddr);
780                         kvaddr = 0;
781                 }
782
783                 /*
784                  * Compute the segment size, and adjust counts.
785                  */
786                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
787                 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
788                 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
789                     map->pagesneeded != 0 &&
790                     bus_dma_run_filter(&dmat->common, curaddr)) {
791                         sgsize = roundup2(sgsize, dmat->common.alignment);
792                         sgsize = MIN(sgsize, max_sgsize);
793                         curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
794                             sgsize);
795                 } else {
796                         sgsize = MIN(sgsize, max_sgsize);
797                 }
798                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
799                     segp);
800                 if (sgsize == 0)
801                         break;
802                 vaddr += sgsize;
803                 buflen -= sgsize;
804         }
805
806         /*
807          * Did we fit?
808          */
809         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
810 }
811
812 static int
813 bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
814     struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,
815     bus_dma_segment_t *segs, int *segp)
816 {
817         vm_paddr_t paddr, next_paddr;
818         int error, page_index;
819         bus_size_t sgsize, max_sgsize;
820
821         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
822                 /*
823                  * If we have to keep the offset of each page this function
824                  * is not suitable, switch back to bus_dmamap_load_ma_triv
825                  * which is going to do the right thing in this case.
826                  */
827                 error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
828                     flags, segs, segp);
829                 return (error);
830         }
831
832         if (map == NULL)
833                 map = &nobounce_dmamap;
834
835         if (segs == NULL)
836                 segs = dmat->segments;
837
838         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
839                 _bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags);
840                 if (map->pagesneeded != 0) {
841                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
842                         if (error)
843                                 return (error);
844                 }
845         }
846
847         page_index = 0;
848         while (buflen > 0) {
849                 /*
850                  * Compute the segment size, and adjust counts.
851                  */
852                 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
853                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
854                 sgsize = PAGE_SIZE - ma_offs;
855                 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
856                     map->pagesneeded != 0 &&
857                     bus_dma_run_filter(&dmat->common, paddr)) {
858                         sgsize = roundup2(sgsize, dmat->common.alignment);
859                         sgsize = MIN(sgsize, max_sgsize);
860                         KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
861                             ("Segment size is not aligned"));
862                         /*
863                          * Check if two pages of the user provided buffer
864                          * are used.
865                          */
866                         if ((ma_offs + sgsize) > PAGE_SIZE)
867                                 next_paddr =
868                                     VM_PAGE_TO_PHYS(ma[page_index + 1]);
869                         else
870                                 next_paddr = 0;
871                         paddr = add_bounce_page(dmat, map, 0, paddr,
872                             next_paddr, sgsize);
873                 } else {
874                         sgsize = MIN(sgsize, max_sgsize);
875                 }
876                 sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs,
877                     segp);
878                 if (sgsize == 0)
879                         break;
880                 KASSERT(buflen >= sgsize,
881                     ("Segment length overruns original buffer"));
882                 buflen -= sgsize;
883                 if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
884                         page_index++;
885                 ma_offs = (ma_offs + sgsize) & PAGE_MASK;
886         }
887
888         /*
889          * Did we fit?
890          */
891         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
892 }
893
894 static void
895 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
896     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
897 {
898
899         if (map == NULL)
900                 return;
901         map->mem = *mem;
902         map->dmat = dmat;
903         map->callback = callback;
904         map->callback_arg = callback_arg;
905 }
906
907 static bus_dma_segment_t *
908 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
909     bus_dma_segment_t *segs, int nsegs, int error)
910 {
911
912         if (segs == NULL)
913                 segs = dmat->segments;
914         return (segs);
915 }
916
917 /*
918  * Release the mapping held by map.
919  */
920 static void
921 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
922 {
923         struct bounce_page *bpage;
924
925         if (map == NULL)
926                 return;
927
928         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
929                 STAILQ_REMOVE_HEAD(&map->bpages, links);
930                 free_bounce_page(dmat, bpage);
931         }
932 }
933
934 static void
935 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
936     bus_dmasync_op_t op)
937 {
938         struct bounce_page *bpage;
939         vm_offset_t datavaddr, tempvaddr;
940         bus_size_t datacount1, datacount2;
941
942         if (map == NULL || (bpage = STAILQ_FIRST(&map->bpages)) == NULL)
943                 return;
944
945         /*
946          * Handle data bouncing.  We might also want to add support for
947          * invalidating the caches on broken hardware.
948          */
949         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
950             "performing bounce", __func__, dmat, dmat->common.flags, op);
951
952         if ((op & BUS_DMASYNC_PREWRITE) != 0) {
953                 while (bpage != NULL) {
954                         tempvaddr = 0;
955                         datavaddr = bpage->datavaddr;
956                         datacount1 = bpage->datacount;
957                         if (datavaddr == 0) {
958                                 tempvaddr =
959                                     pmap_quick_enter_page(bpage->datapage[0]);
960                                 datavaddr = tempvaddr | bpage->dataoffs;
961                                 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
962                                     datacount1);
963                         }
964
965                         bcopy((void *)datavaddr,
966                             (void *)bpage->vaddr, datacount1);
967
968                         if (tempvaddr != 0)
969                                 pmap_quick_remove_page(tempvaddr);
970
971                         if (bpage->datapage[1] == 0) {
972                                 KASSERT(datacount1 == bpage->datacount,
973                 ("Mismatch between data size and provided memory space"));
974                                 goto next_w;
975                         }
976
977                         /*
978                          * We are dealing with an unmapped buffer that expands
979                          * over two pages.
980                          */
981                         datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
982                         datacount2 = bpage->datacount - datacount1;
983                         bcopy((void *)datavaddr,
984                             (void *)(bpage->vaddr + datacount1), datacount2);
985                         pmap_quick_remove_page(datavaddr);
986
987 next_w:
988                         bpage = STAILQ_NEXT(bpage, links);
989                 }
990                 dmat->bounce_zone->total_bounced++;
991         }
992
993         if ((op & BUS_DMASYNC_POSTREAD) != 0) {
994                 while (bpage != NULL) {
995                         tempvaddr = 0;
996                         datavaddr = bpage->datavaddr;
997                         datacount1 = bpage->datacount;
998                         if (datavaddr == 0) {
999                                 tempvaddr =
1000                                     pmap_quick_enter_page(bpage->datapage[0]);
1001                                 datavaddr = tempvaddr | bpage->dataoffs;
1002                                 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
1003                                     datacount1);
1004                         }
1005
1006                         bcopy((void *)bpage->vaddr, (void *)datavaddr,
1007                             datacount1);
1008
1009                         if (tempvaddr != 0)
1010                                 pmap_quick_remove_page(tempvaddr);
1011
1012                         if (bpage->datapage[1] == 0) {
1013                                 KASSERT(datacount1 == bpage->datacount,
1014                 ("Mismatch between data size and provided memory space"));
1015                                 goto next_r;
1016                         }
1017
1018                         /*
1019                          * We are dealing with an unmapped buffer that expands
1020                          * over two pages.
1021                          */
1022                         datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
1023                         datacount2 = bpage->datacount - datacount1;
1024                         bcopy((void *)(bpage->vaddr + datacount1),
1025                             (void *)datavaddr, datacount2);
1026                         pmap_quick_remove_page(datavaddr);
1027
1028 next_r:
1029                         bpage = STAILQ_NEXT(bpage, links);
1030                 }
1031                 dmat->bounce_zone->total_bounced++;
1032         }
1033 }
1034
1035 static void
1036 init_bounce_pages(void *dummy __unused)
1037 {
1038
1039         total_bpages = 0;
1040         STAILQ_INIT(&bounce_zone_list);
1041         STAILQ_INIT(&bounce_map_waitinglist);
1042         STAILQ_INIT(&bounce_map_callbacklist);
1043         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1044 }
1045 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1046
1047 static struct sysctl_ctx_list *
1048 busdma_sysctl_tree(struct bounce_zone *bz)
1049 {
1050
1051         return (&bz->sysctl_tree);
1052 }
1053
1054 static struct sysctl_oid *
1055 busdma_sysctl_tree_top(struct bounce_zone *bz)
1056 {
1057
1058         return (bz->sysctl_tree_top);
1059 }
1060
1061 static int
1062 alloc_bounce_zone(bus_dma_tag_t dmat)
1063 {
1064         struct bounce_zone *bz;
1065
1066         /* Check to see if we already have a suitable zone */
1067         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1068                 if ((dmat->common.alignment <= bz->alignment) &&
1069                     (dmat->common.lowaddr >= bz->lowaddr) &&
1070                     (dmat->common.domain == bz->domain)) {
1071                         dmat->bounce_zone = bz;
1072                         return (0);
1073                 }
1074         }
1075
1076         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1077             M_NOWAIT | M_ZERO)) == NULL)
1078                 return (ENOMEM);
1079
1080         STAILQ_INIT(&bz->bounce_page_list);
1081         bz->free_bpages = 0;
1082         bz->reserved_bpages = 0;
1083         bz->active_bpages = 0;
1084         bz->lowaddr = dmat->common.lowaddr;
1085         bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
1086         bz->map_count = 0;
1087         bz->domain = dmat->common.domain;
1088         snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1089         busdma_zonecount++;
1090         snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1091         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1092         dmat->bounce_zone = bz;
1093
1094         sysctl_ctx_init(&bz->sysctl_tree);
1095         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1096             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1097             CTLFLAG_RD, 0, "");
1098         if (bz->sysctl_tree_top == NULL) {
1099                 sysctl_ctx_free(&bz->sysctl_tree);
1100                 return (0);     /* XXX error code? */
1101         }
1102
1103         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1104             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1105             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1106             "Total bounce pages");
1107         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1108             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1109             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1110             "Free bounce pages");
1111         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1112             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1113             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1114             "Reserved bounce pages");
1115         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1116             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1117             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1118             "Active bounce pages");
1119         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1120             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1121             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1122             "Total bounce requests");
1123         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1124             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1125             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1126             "Total bounce requests that were deferred");
1127         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1128             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1129             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1130         SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1131             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1132             "alignment", CTLFLAG_RD, &bz->alignment, "");
1133         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1134             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1135             "domain", CTLFLAG_RD, &bz->domain, 0,
1136             "memory domain");
1137
1138         return (0);
1139 }
1140
1141 static int
1142 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1143 {
1144         struct bounce_zone *bz;
1145         int count;
1146
1147         bz = dmat->bounce_zone;
1148         count = 0;
1149         while (numpages > 0) {
1150                 struct bounce_page *bpage;
1151
1152                 bpage = (struct bounce_page *)malloc_domain(sizeof(*bpage),
1153                     M_DEVBUF, dmat->common.domain, M_NOWAIT | M_ZERO);
1154
1155                 if (bpage == NULL)
1156                         break;
1157                 bpage->vaddr = (vm_offset_t)contigmalloc_domain(PAGE_SIZE,
1158                     M_DEVBUF, dmat->common.domain, M_NOWAIT, 0ul,
1159                     bz->lowaddr, PAGE_SIZE, 0);
1160                 if (bpage->vaddr == 0) {
1161                         free_domain(bpage, M_DEVBUF);
1162                         break;
1163                 }
1164                 bpage->busaddr = pmap_kextract(bpage->vaddr);
1165                 mtx_lock(&bounce_lock);
1166                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1167                 total_bpages++;
1168                 bz->total_bpages++;
1169                 bz->free_bpages++;
1170                 mtx_unlock(&bounce_lock);
1171                 count++;
1172                 numpages--;
1173         }
1174         return (count);
1175 }
1176
1177 static int
1178 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1179 {
1180         struct bounce_zone *bz;
1181         int pages;
1182
1183         mtx_assert(&bounce_lock, MA_OWNED);
1184         bz = dmat->bounce_zone;
1185         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1186         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1187                 return (map->pagesneeded - (map->pagesreserved + pages));
1188         bz->free_bpages -= pages;
1189         bz->reserved_bpages += pages;
1190         map->pagesreserved += pages;
1191         pages = map->pagesneeded - map->pagesreserved;
1192
1193         return (pages);
1194 }
1195
1196 static bus_addr_t
1197 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1198                 bus_addr_t addr1, bus_addr_t addr2, bus_size_t size)
1199 {
1200         struct bounce_zone *bz;
1201         struct bounce_page *bpage;
1202
1203         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1204         KASSERT(map != NULL && map != &nobounce_dmamap,
1205             ("add_bounce_page: bad map %p", map));
1206
1207         bz = dmat->bounce_zone;
1208         if (map->pagesneeded == 0)
1209                 panic("add_bounce_page: map doesn't need any pages");
1210         map->pagesneeded--;
1211
1212         if (map->pagesreserved == 0)
1213                 panic("add_bounce_page: map doesn't need any pages");
1214         map->pagesreserved--;
1215
1216         mtx_lock(&bounce_lock);
1217         bpage = STAILQ_FIRST(&bz->bounce_page_list);
1218         if (bpage == NULL)
1219                 panic("add_bounce_page: free page list is empty");
1220
1221         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1222         bz->reserved_bpages--;
1223         bz->active_bpages++;
1224         mtx_unlock(&bounce_lock);
1225
1226         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1227                 /* Page offset needs to be preserved. */
1228                 bpage->vaddr |= addr1 & PAGE_MASK;
1229                 bpage->busaddr |= addr1 & PAGE_MASK;
1230                 KASSERT(addr2 == 0,
1231         ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
1232         }
1233         bpage->datavaddr = vaddr;
1234         bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
1235         KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
1236         bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
1237         bpage->dataoffs = addr1 & PAGE_MASK;
1238         bpage->datacount = size;
1239         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1240         return (bpage->busaddr);
1241 }
1242
1243 static void
1244 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1245 {
1246         struct bus_dmamap *map;
1247         struct bounce_zone *bz;
1248
1249         bz = dmat->bounce_zone;
1250         bpage->datavaddr = 0;
1251         bpage->datacount = 0;
1252         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1253                 /*
1254                  * Reset the bounce page to start at offset 0.  Other uses
1255                  * of this bounce page may need to store a full page of
1256                  * data and/or assume it starts on a page boundary.
1257                  */
1258                 bpage->vaddr &= ~PAGE_MASK;
1259                 bpage->busaddr &= ~PAGE_MASK;
1260         }
1261
1262         mtx_lock(&bounce_lock);
1263         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1264         bz->free_bpages++;
1265         bz->active_bpages--;
1266         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1267                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1268                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1269                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1270                             map, links);
1271                         busdma_swi_pending = 1;
1272                         bz->total_deferred++;
1273                         swi_sched(vm_ih, 0);
1274                 }
1275         }
1276         mtx_unlock(&bounce_lock);
1277 }
1278
1279 void
1280 busdma_swi(void)
1281 {
1282         bus_dma_tag_t dmat;
1283         struct bus_dmamap *map;
1284
1285         mtx_lock(&bounce_lock);
1286         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1287                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1288                 mtx_unlock(&bounce_lock);
1289                 dmat = map->dmat;
1290                 (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
1291                 bus_dmamap_load_mem(map->dmat, map, &map->mem,
1292                     map->callback, map->callback_arg, BUS_DMA_WAITOK);
1293                 (dmat->common.lockfunc)(dmat->common.lockfuncarg,
1294                     BUS_DMA_UNLOCK);
1295                 mtx_lock(&bounce_lock);
1296         }
1297         mtx_unlock(&bounce_lock);
1298 }
1299
1300 struct bus_dma_impl bus_dma_bounce_impl = {
1301         .tag_create = bounce_bus_dma_tag_create,
1302         .tag_destroy = bounce_bus_dma_tag_destroy,
1303         .tag_set_domain = bounce_bus_dma_tag_set_domain,
1304         .map_create = bounce_bus_dmamap_create,
1305         .map_destroy = bounce_bus_dmamap_destroy,
1306         .mem_alloc = bounce_bus_dmamem_alloc,
1307         .mem_free = bounce_bus_dmamem_free,
1308         .load_phys = bounce_bus_dmamap_load_phys,
1309         .load_buffer = bounce_bus_dmamap_load_buffer,
1310         .load_ma = bounce_bus_dmamap_load_ma,
1311         .map_waitok = bounce_bus_dmamap_waitok,
1312         .map_complete = bounce_bus_dmamap_complete,
1313         .map_unload = bounce_bus_dmamap_unload,
1314         .map_sync = bounce_bus_dmamap_sync,
1315 };