]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/x86/x86/busdma_bounce.c
Update mandoc to 1.14.5
[FreeBSD/FreeBSD.git] / sys / x86 / x86 / busdma_bounce.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/domainset.h>
35 #include <sys/malloc.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/proc.h>
42 #include <sys/memdesc.h>
43 #include <sys/mutex.h>
44 #include <sys/sysctl.h>
45 #include <sys/uio.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52
53 #include <machine/atomic.h>
54 #include <machine/bus.h>
55 #include <machine/md_var.h>
56 #include <machine/specialreg.h>
57 #include <x86/include/busdma_impl.h>
58
59 #ifdef __i386__
60 #define MAX_BPAGES (Maxmem > atop(0x100000000ULL) ? 8192 : 512)
61 #else
62 #define MAX_BPAGES 8192
63 #endif
64
65 enum {
66         BUS_DMA_COULD_BOUNCE    = 0x01,
67         BUS_DMA_MIN_ALLOC_COMP  = 0x02,
68         BUS_DMA_KMEM_ALLOC      = 0x04,
69 };
70
71 struct bounce_zone;
72
73 struct bus_dma_tag {
74         struct bus_dma_tag_common common;
75         int                     map_count;
76         int                     bounce_flags;
77         bus_dma_segment_t       *segments;
78         struct bounce_zone      *bounce_zone;
79 };
80
81 struct bounce_page {
82         vm_offset_t     vaddr;          /* kva of bounce buffer */
83         bus_addr_t      busaddr;        /* Physical address */
84         vm_offset_t     datavaddr;      /* kva of client data */
85         vm_offset_t     dataoffs;       /* page offset of client data */
86         vm_page_t       datapage[2];    /* physical page(s) of client data */
87         bus_size_t      datacount;      /* client data count */
88         STAILQ_ENTRY(bounce_page) links;
89 };
90
91 int busdma_swi_pending;
92
93 struct bounce_zone {
94         STAILQ_ENTRY(bounce_zone) links;
95         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
96         int             total_bpages;
97         int             free_bpages;
98         int             reserved_bpages;
99         int             active_bpages;
100         int             total_bounced;
101         int             total_deferred;
102         int             map_count;
103         int             domain;
104         bus_size_t      alignment;
105         bus_addr_t      lowaddr;
106         char            zoneid[8];
107         char            lowaddrid[20];
108         struct sysctl_ctx_list sysctl_tree;
109         struct sysctl_oid *sysctl_tree_top;
110 };
111
112 static struct mtx bounce_lock;
113 static int total_bpages;
114 static int busdma_zonecount;
115 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
116
117 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
118 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
119            "Total bounce pages");
120
121 struct bus_dmamap {
122         struct bp_list         bpages;
123         int                    pagesneeded;
124         int                    pagesreserved;
125         bus_dma_tag_t          dmat;
126         struct memdesc         mem;
127         bus_dmamap_callback_t *callback;
128         void                  *callback_arg;
129         STAILQ_ENTRY(bus_dmamap) links;
130 };
131
132 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
133 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
134 static struct bus_dmamap nobounce_dmamap;
135
136 static void init_bounce_pages(void *dummy);
137 static int alloc_bounce_zone(bus_dma_tag_t dmat);
138 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
139 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
140     int commit);
141 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
142     vm_offset_t vaddr, vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size);
143 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
144 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
145     pmap_t pmap, void *buf, bus_size_t buflen, int flags);
146 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
147     vm_paddr_t buf, bus_size_t buflen, int flags);
148 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
149     int flags);
150
151 static int
152 bounce_bus_dma_zone_setup(bus_dma_tag_t dmat)
153 {
154         struct bounce_zone *bz;
155         int error;
156
157         /* Must bounce */
158         if ((error = alloc_bounce_zone(dmat)) != 0)
159                 return (error);
160         bz = dmat->bounce_zone;
161
162         if (ptoa(bz->total_bpages) < dmat->common.maxsize) {
163                 int pages;
164
165                 pages = atop(dmat->common.maxsize) - bz->total_bpages;
166
167                 /* Add pages to our bounce pool */
168                 if (alloc_bounce_pages(dmat, pages) < pages)
169                         return (ENOMEM);
170         }
171         /* Performed initial allocation */
172         dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP;
173
174         return (0);
175 }
176
177 /*
178  * Allocate a device specific dma_tag.
179  */
180 static int
181 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
182     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
183     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
184     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
185     void *lockfuncarg, bus_dma_tag_t *dmat)
186 {
187         bus_dma_tag_t newtag;
188         int error;
189
190         *dmat = NULL;
191         error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
192             NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
193             maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
194             sizeof (struct bus_dma_tag), (void **)&newtag);
195         if (error != 0)
196                 return (error);
197
198         newtag->common.impl = &bus_dma_bounce_impl;
199         newtag->map_count = 0;
200         newtag->segments = NULL;
201
202         if (parent != NULL && (newtag->common.filter != NULL ||
203             (parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0))
204                 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
205
206         if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
207             newtag->common.alignment > 1)
208                 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
209
210         if ((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
211             (flags & BUS_DMA_ALLOCNOW) != 0)
212                 error = bounce_bus_dma_zone_setup(newtag);
213         else
214                 error = 0;
215         
216         if (error != 0)
217                 free(newtag, M_DEVBUF);
218         else
219                 *dmat = newtag;
220         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
221             __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
222             error);
223         return (error);
224 }
225
226 /*
227  * Update the domain for the tag.  We may need to reallocate the zone and
228  * bounce pages.
229  */ 
230 static int
231 bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
232 {
233
234         KASSERT(dmat->map_count == 0,
235             ("bounce_bus_dma_tag_set_domain:  Domain set after use.\n"));
236         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0 ||
237             dmat->bounce_zone == NULL)
238                 return (0);
239         dmat->bounce_flags &= ~BUS_DMA_MIN_ALLOC_COMP;
240         return (bounce_bus_dma_zone_setup(dmat));
241 }
242
243 static int
244 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
245 {
246         bus_dma_tag_t dmat_copy, parent;
247         int error;
248
249         error = 0;
250         dmat_copy = dmat;
251
252         if (dmat != NULL) {
253                 if (dmat->map_count != 0) {
254                         error = EBUSY;
255                         goto out;
256                 }
257                 while (dmat != NULL) {
258                         parent = (bus_dma_tag_t)dmat->common.parent;
259                         atomic_subtract_int(&dmat->common.ref_count, 1);
260                         if (dmat->common.ref_count == 0) {
261                                 if (dmat->segments != NULL)
262                                         free_domain(dmat->segments, M_DEVBUF);
263                                 free(dmat, M_DEVBUF);
264                                 /*
265                                  * Last reference count, so
266                                  * release our reference
267                                  * count on our parent.
268                                  */
269                                 dmat = parent;
270                         } else
271                                 dmat = NULL;
272                 }
273         }
274 out:
275         CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
276         return (error);
277 }
278
279 /*
280  * Allocate a handle for mapping from kva/uva/physical
281  * address space into bus device space.
282  */
283 static int
284 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
285 {
286         struct bounce_zone *bz;
287         int error, maxpages, pages;
288
289         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__);
290
291         error = 0;
292
293         if (dmat->segments == NULL) {
294                 dmat->segments = (bus_dma_segment_t *)malloc_domainset(
295                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
296                     M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT);
297                 if (dmat->segments == NULL) {
298                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
299                             __func__, dmat, ENOMEM);
300                         return (ENOMEM);
301                 }
302         }
303
304         /*
305          * Bouncing might be required if the driver asks for an active
306          * exclusion region, a data alignment that is stricter than 1, and/or
307          * an active address boundary.
308          */
309         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
310                 /* Must bounce */
311                 if (dmat->bounce_zone == NULL) {
312                         if ((error = alloc_bounce_zone(dmat)) != 0)
313                                 return (error);
314                 }
315                 bz = dmat->bounce_zone;
316
317                 *mapp = (bus_dmamap_t)malloc_domainset(sizeof(**mapp), M_DEVBUF,
318                     DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO);
319                 if (*mapp == NULL) {
320                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
321                             __func__, dmat, ENOMEM);
322                         return (ENOMEM);
323                 }
324
325                 /* Initialize the new map */
326                 STAILQ_INIT(&((*mapp)->bpages));
327
328                 /*
329                  * Attempt to add pages to our pool on a per-instance
330                  * basis up to a sane limit.
331                  */
332                 if (dmat->common.alignment > 1)
333                         maxpages = MAX_BPAGES;
334                 else
335                         maxpages = MIN(MAX_BPAGES, Maxmem -
336                             atop(dmat->common.lowaddr));
337                 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
338                     (bz->map_count > 0 && bz->total_bpages < maxpages)) {
339                         pages = MAX(atop(dmat->common.maxsize), 1);
340                         pages = MIN(maxpages - bz->total_bpages, pages);
341                         pages = MAX(pages, 1);
342                         if (alloc_bounce_pages(dmat, pages) < pages)
343                                 error = ENOMEM;
344                         if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP)
345                             == 0) {
346                                 if (error == 0) {
347                                         dmat->bounce_flags |=
348                                             BUS_DMA_MIN_ALLOC_COMP;
349                                 }
350                         } else
351                                 error = 0;
352                 }
353                 bz->map_count++;
354         } else {
355                 *mapp = NULL;
356         }
357         if (error == 0)
358                 dmat->map_count++;
359         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
360             __func__, dmat, dmat->common.flags, error);
361         return (error);
362 }
363
364 /*
365  * Destroy a handle for mapping from kva/uva/physical
366  * address space into bus device space.
367  */
368 static int
369 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
370 {
371
372         if (map != NULL && map != &nobounce_dmamap) {
373                 if (STAILQ_FIRST(&map->bpages) != NULL) {
374                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
375                             __func__, dmat, EBUSY);
376                         return (EBUSY);
377                 }
378                 if (dmat->bounce_zone)
379                         dmat->bounce_zone->map_count--;
380                 free_domain(map, M_DEVBUF);
381         }
382         dmat->map_count--;
383         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
384         return (0);
385 }
386
387
388 /*
389  * Allocate a piece of memory that can be efficiently mapped into
390  * bus device space based on the constraints lited in the dma tag.
391  * A dmamap to for use with dmamap_load is also allocated.
392  */
393 static int
394 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
395     bus_dmamap_t *mapp)
396 {
397         vm_memattr_t attr;
398         int mflags;
399
400         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__);
401
402         if (flags & BUS_DMA_NOWAIT)
403                 mflags = M_NOWAIT;
404         else
405                 mflags = M_WAITOK;
406
407         /* If we succeed, no mapping/bouncing will be required */
408         *mapp = NULL;
409
410         if (dmat->segments == NULL) {
411                 dmat->segments = (bus_dma_segment_t *)malloc_domainset(
412                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
413                     M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags);
414                 if (dmat->segments == NULL) {
415                         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
416                             __func__, dmat, dmat->common.flags, ENOMEM);
417                         return (ENOMEM);
418                 }
419         }
420         if (flags & BUS_DMA_ZERO)
421                 mflags |= M_ZERO;
422         if (flags & BUS_DMA_NOCACHE)
423                 attr = VM_MEMATTR_UNCACHEABLE;
424         else
425                 attr = VM_MEMATTR_DEFAULT;
426
427         /*
428          * Allocate the buffer from the malloc(9) allocator if...
429          *  - It's small enough to fit into a single power of two sized bucket.
430          *  - The alignment is less than or equal to the maximum size
431          *  - The low address requirement is fulfilled.
432          * else allocate non-contiguous pages if...
433          *  - The page count that could get allocated doesn't exceed
434          *    nsegments also when the maximum segment size is less
435          *    than PAGE_SIZE.
436          *  - The alignment constraint isn't larger than a page boundary.
437          *  - There are no boundary-crossing constraints.
438          * else allocate a block of contiguous pages because one or more of the
439          * constraints is something that only the contig allocator can fulfill.
440          *
441          * NOTE: The (dmat->common.alignment <= dmat->maxsize) check
442          * below is just a quick hack. The exact alignment guarantees
443          * of malloc(9) need to be nailed down, and the code below
444          * should be rewritten to take that into account.
445          *
446          * In the meantime warn the user if malloc gets it wrong.
447          */
448         if (dmat->common.maxsize <= PAGE_SIZE &&
449             dmat->common.alignment <= dmat->common.maxsize &&
450             dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
451             attr == VM_MEMATTR_DEFAULT) {
452                 *vaddr = malloc_domainset(dmat->common.maxsize, M_DEVBUF,
453                     DOMAINSET_PREF(dmat->common.domain), mflags);
454         } else if (dmat->common.nsegments >=
455             howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz,
456             PAGE_SIZE)) &&
457             dmat->common.alignment <= PAGE_SIZE &&
458             (dmat->common.boundary % PAGE_SIZE) == 0) {
459                 /* Page-based multi-segment allocations allowed */
460                 *vaddr = (void *)kmem_alloc_attr_domainset(
461                     DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
462                     mflags, 0ul, dmat->common.lowaddr, attr);
463                 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
464         } else {
465                 *vaddr = (void *)kmem_alloc_contig_domainset(
466                     DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
467                     mflags, 0ul, dmat->common.lowaddr,
468                     dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
469                     dmat->common.boundary, attr);
470                 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
471         }
472         if (*vaddr == NULL) {
473                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
474                     __func__, dmat, dmat->common.flags, ENOMEM);
475                 return (ENOMEM);
476         } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
477                 printf("bus_dmamem_alloc failed to align memory properly.\n");
478         }
479         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
480             __func__, dmat, dmat->common.flags, 0);
481         return (0);
482 }
483
484 /*
485  * Free a piece of memory and it's allociated dmamap, that was allocated
486  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
487  */
488 static void
489 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
490 {
491         /*
492          * dmamem does not need to be bounced, so the map should be
493          * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc()
494          * was used and set if kmem_alloc_contig() was used.
495          */
496         if (map != NULL)
497                 panic("bus_dmamem_free: Invalid map freed\n");
498         if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
499                 free_domain(vaddr, M_DEVBUF);
500         else
501                 kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
502         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
503             dmat->bounce_flags);
504 }
505
506 static void
507 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
508     bus_size_t buflen, int flags)
509 {
510         vm_paddr_t curaddr;
511         bus_size_t sgsize;
512
513         if (map != &nobounce_dmamap && map->pagesneeded == 0) {
514                 /*
515                  * Count the number of bounce pages
516                  * needed in order to complete this transfer
517                  */
518                 curaddr = buf;
519                 while (buflen != 0) {
520                         sgsize = MIN(buflen, dmat->common.maxsegsz);
521                         if (bus_dma_run_filter(&dmat->common, curaddr)) {
522                                 sgsize = MIN(sgsize,
523                                     PAGE_SIZE - (curaddr & PAGE_MASK));
524                                 map->pagesneeded++;
525                         }
526                         curaddr += sgsize;
527                         buflen -= sgsize;
528                 }
529                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
530         }
531 }
532
533 static void
534 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
535     void *buf, bus_size_t buflen, int flags)
536 {
537         vm_offset_t vaddr;
538         vm_offset_t vendaddr;
539         vm_paddr_t paddr;
540         bus_size_t sg_len;
541
542         if (map != &nobounce_dmamap && map->pagesneeded == 0) {
543                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
544                     "alignment= %d", dmat->common.lowaddr,
545                     ptoa((vm_paddr_t)Maxmem),
546                     dmat->common.boundary, dmat->common.alignment);
547                 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
548                     map, &nobounce_dmamap, map->pagesneeded);
549                 /*
550                  * Count the number of bounce pages
551                  * needed in order to complete this transfer
552                  */
553                 vaddr = (vm_offset_t)buf;
554                 vendaddr = (vm_offset_t)buf + buflen;
555
556                 while (vaddr < vendaddr) {
557                         sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
558                         if (pmap == kernel_pmap)
559                                 paddr = pmap_kextract(vaddr);
560                         else
561                                 paddr = pmap_extract(pmap, vaddr);
562                         if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
563                                 sg_len = roundup2(sg_len,
564                                     dmat->common.alignment);
565                                 map->pagesneeded++;
566                         }
567                         vaddr += sg_len;
568                 }
569                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
570         }
571 }
572
573 static void
574 _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
575     int ma_offs, bus_size_t buflen, int flags)
576 {
577         bus_size_t sg_len, max_sgsize;
578         int page_index;
579         vm_paddr_t paddr;
580
581         if (map != &nobounce_dmamap && map->pagesneeded == 0) {
582                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
583                     "alignment= %d", dmat->common.lowaddr,
584                     ptoa((vm_paddr_t)Maxmem),
585                     dmat->common.boundary, dmat->common.alignment);
586                 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
587                     map, &nobounce_dmamap, map->pagesneeded);
588
589                 /*
590                  * Count the number of bounce pages
591                  * needed in order to complete this transfer
592                  */
593                 page_index = 0;
594                 while (buflen > 0) {
595                         paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
596                         sg_len = PAGE_SIZE - ma_offs;
597                         max_sgsize = MIN(buflen, dmat->common.maxsegsz);
598                         sg_len = MIN(sg_len, max_sgsize);
599                         if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
600                                 sg_len = roundup2(sg_len,
601                                     dmat->common.alignment);
602                                 sg_len = MIN(sg_len, max_sgsize);
603                                 KASSERT((sg_len & (dmat->common.alignment - 1))
604                                     == 0, ("Segment size is not aligned"));
605                                 map->pagesneeded++;
606                         }
607                         if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
608                                 page_index++;
609                         ma_offs = (ma_offs + sg_len) & PAGE_MASK;
610                         KASSERT(buflen >= sg_len,
611                             ("Segment length overruns original buffer"));
612                         buflen -= sg_len;
613                 }
614                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
615         }
616 }
617
618 static int
619 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
620 {
621
622         /* Reserve Necessary Bounce Pages */
623         mtx_lock(&bounce_lock);
624         if (flags & BUS_DMA_NOWAIT) {
625                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
626                         mtx_unlock(&bounce_lock);
627                         return (ENOMEM);
628                 }
629         } else {
630                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
631                         /* Queue us for resources */
632                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
633                         mtx_unlock(&bounce_lock);
634                         return (EINPROGRESS);
635                 }
636         }
637         mtx_unlock(&bounce_lock);
638
639         return (0);
640 }
641
642 /*
643  * Add a single contiguous physical range to the segment list.
644  */
645 static int
646 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
647     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
648 {
649         bus_addr_t baddr, bmask;
650         int seg;
651
652         KASSERT(curaddr <= BUS_SPACE_MAXADDR,
653             ("ds_addr %#jx > BUS_SPACE_MAXADDR %#jx; dmat %p fl %#x low %#jx "
654             "hi %#jx",
655             (uintmax_t)curaddr, (uintmax_t)BUS_SPACE_MAXADDR,
656             dmat, dmat->bounce_flags, (uintmax_t)dmat->common.lowaddr,
657             (uintmax_t)dmat->common.highaddr));
658
659         /*
660          * Make sure we don't cross any boundaries.
661          */
662         bmask = ~(dmat->common.boundary - 1);
663         if (dmat->common.boundary > 0) {
664                 baddr = (curaddr + dmat->common.boundary) & bmask;
665                 if (sgsize > (baddr - curaddr))
666                         sgsize = (baddr - curaddr);
667         }
668
669         /*
670          * Insert chunk into a segment, coalescing with
671          * previous segment if possible.
672          */
673         seg = *segp;
674         if (seg == -1) {
675                 seg = 0;
676                 segs[seg].ds_addr = curaddr;
677                 segs[seg].ds_len = sgsize;
678         } else {
679                 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
680                     (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
681                     (dmat->common.boundary == 0 ||
682                      (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
683                         segs[seg].ds_len += sgsize;
684                 else {
685                         if (++seg >= dmat->common.nsegments)
686                                 return (0);
687                         segs[seg].ds_addr = curaddr;
688                         segs[seg].ds_len = sgsize;
689                 }
690         }
691         *segp = seg;
692         return (sgsize);
693 }
694
695 /*
696  * Utility function to load a physical buffer.  segp contains
697  * the starting segment on entrace, and the ending segment on exit.
698  */
699 static int
700 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
701     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
702     int *segp)
703 {
704         bus_size_t sgsize;
705         vm_paddr_t curaddr;
706         int error;
707
708         if (map == NULL)
709                 map = &nobounce_dmamap;
710
711         if (segs == NULL)
712                 segs = dmat->segments;
713
714         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
715                 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
716                 if (map->pagesneeded != 0) {
717                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
718                         if (error)
719                                 return (error);
720                 }
721         }
722
723         while (buflen > 0) {
724                 curaddr = buf;
725                 sgsize = MIN(buflen, dmat->common.maxsegsz);
726                 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
727                     map->pagesneeded != 0 &&
728                     bus_dma_run_filter(&dmat->common, curaddr)) {
729                         sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
730                         curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
731                             sgsize);
732                 }
733                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
734                     segp);
735                 if (sgsize == 0)
736                         break;
737                 buf += sgsize;
738                 buflen -= sgsize;
739         }
740
741         /*
742          * Did we fit?
743          */
744         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
745 }
746
747 /*
748  * Utility function to load a linear buffer.  segp contains
749  * the starting segment on entrace, and the ending segment on exit.
750  */
751 static int
752 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
753     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
754     int *segp)
755 {
756         bus_size_t sgsize, max_sgsize;
757         vm_paddr_t curaddr;
758         vm_offset_t kvaddr, vaddr;
759         int error;
760
761         if (map == NULL)
762                 map = &nobounce_dmamap;
763
764         if (segs == NULL)
765                 segs = dmat->segments;
766
767         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
768                 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
769                 if (map->pagesneeded != 0) {
770                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
771                         if (error)
772                                 return (error);
773                 }
774         }
775
776         vaddr = (vm_offset_t)buf;
777         while (buflen > 0) {
778                 /*
779                  * Get the physical address for this segment.
780                  */
781                 if (pmap == kernel_pmap) {
782                         curaddr = pmap_kextract(vaddr);
783                         kvaddr = vaddr;
784                 } else {
785                         curaddr = pmap_extract(pmap, vaddr);
786                         kvaddr = 0;
787                 }
788
789                 /*
790                  * Compute the segment size, and adjust counts.
791                  */
792                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
793                 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
794                 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
795                     map->pagesneeded != 0 &&
796                     bus_dma_run_filter(&dmat->common, curaddr)) {
797                         sgsize = roundup2(sgsize, dmat->common.alignment);
798                         sgsize = MIN(sgsize, max_sgsize);
799                         curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
800                             sgsize);
801                 } else {
802                         sgsize = MIN(sgsize, max_sgsize);
803                 }
804                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
805                     segp);
806                 if (sgsize == 0)
807                         break;
808                 vaddr += sgsize;
809                 buflen -= sgsize;
810         }
811
812         /*
813          * Did we fit?
814          */
815         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
816 }
817
818 static int
819 bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
820     struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,
821     bus_dma_segment_t *segs, int *segp)
822 {
823         vm_paddr_t paddr, next_paddr;
824         int error, page_index;
825         bus_size_t sgsize, max_sgsize;
826
827         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
828                 /*
829                  * If we have to keep the offset of each page this function
830                  * is not suitable, switch back to bus_dmamap_load_ma_triv
831                  * which is going to do the right thing in this case.
832                  */
833                 error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
834                     flags, segs, segp);
835                 return (error);
836         }
837
838         if (map == NULL)
839                 map = &nobounce_dmamap;
840
841         if (segs == NULL)
842                 segs = dmat->segments;
843
844         if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
845                 _bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags);
846                 if (map->pagesneeded != 0) {
847                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
848                         if (error)
849                                 return (error);
850                 }
851         }
852
853         page_index = 0;
854         while (buflen > 0) {
855                 /*
856                  * Compute the segment size, and adjust counts.
857                  */
858                 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
859                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
860                 sgsize = PAGE_SIZE - ma_offs;
861                 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
862                     map->pagesneeded != 0 &&
863                     bus_dma_run_filter(&dmat->common, paddr)) {
864                         sgsize = roundup2(sgsize, dmat->common.alignment);
865                         sgsize = MIN(sgsize, max_sgsize);
866                         KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
867                             ("Segment size is not aligned"));
868                         /*
869                          * Check if two pages of the user provided buffer
870                          * are used.
871                          */
872                         if ((ma_offs + sgsize) > PAGE_SIZE)
873                                 next_paddr =
874                                     VM_PAGE_TO_PHYS(ma[page_index + 1]);
875                         else
876                                 next_paddr = 0;
877                         paddr = add_bounce_page(dmat, map, 0, paddr,
878                             next_paddr, sgsize);
879                 } else {
880                         sgsize = MIN(sgsize, max_sgsize);
881                 }
882                 sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs,
883                     segp);
884                 if (sgsize == 0)
885                         break;
886                 KASSERT(buflen >= sgsize,
887                     ("Segment length overruns original buffer"));
888                 buflen -= sgsize;
889                 if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
890                         page_index++;
891                 ma_offs = (ma_offs + sgsize) & PAGE_MASK;
892         }
893
894         /*
895          * Did we fit?
896          */
897         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
898 }
899
900 static void
901 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
902     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
903 {
904
905         if (map == NULL)
906                 return;
907         map->mem = *mem;
908         map->dmat = dmat;
909         map->callback = callback;
910         map->callback_arg = callback_arg;
911 }
912
913 static bus_dma_segment_t *
914 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
915     bus_dma_segment_t *segs, int nsegs, int error)
916 {
917
918         if (segs == NULL)
919                 segs = dmat->segments;
920         return (segs);
921 }
922
923 /*
924  * Release the mapping held by map.
925  */
926 static void
927 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
928 {
929         struct bounce_page *bpage;
930
931         if (map == NULL)
932                 return;
933
934         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
935                 STAILQ_REMOVE_HEAD(&map->bpages, links);
936                 free_bounce_page(dmat, bpage);
937         }
938 }
939
940 static void
941 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
942     bus_dmasync_op_t op)
943 {
944         struct bounce_page *bpage;
945         vm_offset_t datavaddr, tempvaddr;
946         bus_size_t datacount1, datacount2;
947
948         if (map == NULL || (bpage = STAILQ_FIRST(&map->bpages)) == NULL)
949                 return;
950
951         /*
952          * Handle data bouncing.  We might also want to add support for
953          * invalidating the caches on broken hardware.
954          */
955         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
956             "performing bounce", __func__, dmat, dmat->common.flags, op);
957
958         if ((op & BUS_DMASYNC_PREWRITE) != 0) {
959                 while (bpage != NULL) {
960                         tempvaddr = 0;
961                         datavaddr = bpage->datavaddr;
962                         datacount1 = bpage->datacount;
963                         if (datavaddr == 0) {
964                                 tempvaddr =
965                                     pmap_quick_enter_page(bpage->datapage[0]);
966                                 datavaddr = tempvaddr | bpage->dataoffs;
967                                 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
968                                     datacount1);
969                         }
970
971                         bcopy((void *)datavaddr,
972                             (void *)bpage->vaddr, datacount1);
973
974                         if (tempvaddr != 0)
975                                 pmap_quick_remove_page(tempvaddr);
976
977                         if (bpage->datapage[1] == 0) {
978                                 KASSERT(datacount1 == bpage->datacount,
979                 ("Mismatch between data size and provided memory space"));
980                                 goto next_w;
981                         }
982
983                         /*
984                          * We are dealing with an unmapped buffer that expands
985                          * over two pages.
986                          */
987                         datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
988                         datacount2 = bpage->datacount - datacount1;
989                         bcopy((void *)datavaddr,
990                             (void *)(bpage->vaddr + datacount1), datacount2);
991                         pmap_quick_remove_page(datavaddr);
992
993 next_w:
994                         bpage = STAILQ_NEXT(bpage, links);
995                 }
996                 dmat->bounce_zone->total_bounced++;
997         }
998
999         if ((op & BUS_DMASYNC_POSTREAD) != 0) {
1000                 while (bpage != NULL) {
1001                         tempvaddr = 0;
1002                         datavaddr = bpage->datavaddr;
1003                         datacount1 = bpage->datacount;
1004                         if (datavaddr == 0) {
1005                                 tempvaddr =
1006                                     pmap_quick_enter_page(bpage->datapage[0]);
1007                                 datavaddr = tempvaddr | bpage->dataoffs;
1008                                 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
1009                                     datacount1);
1010                         }
1011
1012                         bcopy((void *)bpage->vaddr, (void *)datavaddr,
1013                             datacount1);
1014
1015                         if (tempvaddr != 0)
1016                                 pmap_quick_remove_page(tempvaddr);
1017
1018                         if (bpage->datapage[1] == 0) {
1019                                 KASSERT(datacount1 == bpage->datacount,
1020                 ("Mismatch between data size and provided memory space"));
1021                                 goto next_r;
1022                         }
1023
1024                         /*
1025                          * We are dealing with an unmapped buffer that expands
1026                          * over two pages.
1027                          */
1028                         datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
1029                         datacount2 = bpage->datacount - datacount1;
1030                         bcopy((void *)(bpage->vaddr + datacount1),
1031                             (void *)datavaddr, datacount2);
1032                         pmap_quick_remove_page(datavaddr);
1033
1034 next_r:
1035                         bpage = STAILQ_NEXT(bpage, links);
1036                 }
1037                 dmat->bounce_zone->total_bounced++;
1038         }
1039 }
1040
1041 static void
1042 init_bounce_pages(void *dummy __unused)
1043 {
1044
1045         total_bpages = 0;
1046         STAILQ_INIT(&bounce_zone_list);
1047         STAILQ_INIT(&bounce_map_waitinglist);
1048         STAILQ_INIT(&bounce_map_callbacklist);
1049         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1050 }
1051 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1052
1053 static struct sysctl_ctx_list *
1054 busdma_sysctl_tree(struct bounce_zone *bz)
1055 {
1056
1057         return (&bz->sysctl_tree);
1058 }
1059
1060 static struct sysctl_oid *
1061 busdma_sysctl_tree_top(struct bounce_zone *bz)
1062 {
1063
1064         return (bz->sysctl_tree_top);
1065 }
1066
1067 static int
1068 alloc_bounce_zone(bus_dma_tag_t dmat)
1069 {
1070         struct bounce_zone *bz;
1071
1072         /* Check to see if we already have a suitable zone */
1073         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1074                 if (dmat->common.alignment <= bz->alignment &&
1075                     dmat->common.lowaddr >= bz->lowaddr &&
1076                     dmat->common.domain == bz->domain) {
1077                         dmat->bounce_zone = bz;
1078                         return (0);
1079                 }
1080         }
1081
1082         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1083             M_NOWAIT | M_ZERO)) == NULL)
1084                 return (ENOMEM);
1085
1086         STAILQ_INIT(&bz->bounce_page_list);
1087         bz->free_bpages = 0;
1088         bz->reserved_bpages = 0;
1089         bz->active_bpages = 0;
1090         bz->lowaddr = dmat->common.lowaddr;
1091         bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
1092         bz->map_count = 0;
1093         bz->domain = dmat->common.domain;
1094         snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1095         busdma_zonecount++;
1096         snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1097         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1098         dmat->bounce_zone = bz;
1099
1100         sysctl_ctx_init(&bz->sysctl_tree);
1101         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1102             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1103             CTLFLAG_RD, 0, "");
1104         if (bz->sysctl_tree_top == NULL) {
1105                 sysctl_ctx_free(&bz->sysctl_tree);
1106                 return (0);     /* XXX error code? */
1107         }
1108
1109         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1110             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1111             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1112             "Total bounce pages");
1113         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1114             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1115             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1116             "Free bounce pages");
1117         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1118             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1119             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1120             "Reserved bounce pages");
1121         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1122             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1123             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1124             "Active bounce pages");
1125         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1126             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1127             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1128             "Total bounce requests");
1129         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1130             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1131             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1132             "Total bounce requests that were deferred");
1133         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1134             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1135             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1136         SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1137             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1138             "alignment", CTLFLAG_RD, &bz->alignment, "");
1139         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1140             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1141             "domain", CTLFLAG_RD, &bz->domain, 0,
1142             "memory domain");
1143
1144         return (0);
1145 }
1146
1147 static int
1148 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1149 {
1150         struct bounce_zone *bz;
1151         int count;
1152
1153         bz = dmat->bounce_zone;
1154         count = 0;
1155         while (numpages > 0) {
1156                 struct bounce_page *bpage;
1157
1158                 bpage = malloc_domainset(sizeof(*bpage), M_DEVBUF,
1159                     DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO);
1160
1161                 if (bpage == NULL)
1162                         break;
1163                 bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE,
1164                     M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT,
1165                     0ul, bz->lowaddr, PAGE_SIZE, 0);
1166                 if (bpage->vaddr == 0) {
1167                         free_domain(bpage, M_DEVBUF);
1168                         break;
1169                 }
1170                 bpage->busaddr = pmap_kextract(bpage->vaddr);
1171                 mtx_lock(&bounce_lock);
1172                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1173                 total_bpages++;
1174                 bz->total_bpages++;
1175                 bz->free_bpages++;
1176                 mtx_unlock(&bounce_lock);
1177                 count++;
1178                 numpages--;
1179         }
1180         return (count);
1181 }
1182
1183 static int
1184 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1185 {
1186         struct bounce_zone *bz;
1187         int pages;
1188
1189         mtx_assert(&bounce_lock, MA_OWNED);
1190         bz = dmat->bounce_zone;
1191         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1192         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1193                 return (map->pagesneeded - (map->pagesreserved + pages));
1194         bz->free_bpages -= pages;
1195         bz->reserved_bpages += pages;
1196         map->pagesreserved += pages;
1197         pages = map->pagesneeded - map->pagesreserved;
1198
1199         return (pages);
1200 }
1201
1202 static bus_addr_t
1203 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1204     vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size)
1205 {
1206         struct bounce_zone *bz;
1207         struct bounce_page *bpage;
1208
1209         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1210         KASSERT(map != NULL && map != &nobounce_dmamap,
1211             ("add_bounce_page: bad map %p", map));
1212
1213         bz = dmat->bounce_zone;
1214         if (map->pagesneeded == 0)
1215                 panic("add_bounce_page: map doesn't need any pages");
1216         map->pagesneeded--;
1217
1218         if (map->pagesreserved == 0)
1219                 panic("add_bounce_page: map doesn't need any pages");
1220         map->pagesreserved--;
1221
1222         mtx_lock(&bounce_lock);
1223         bpage = STAILQ_FIRST(&bz->bounce_page_list);
1224         if (bpage == NULL)
1225                 panic("add_bounce_page: free page list is empty");
1226
1227         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1228         bz->reserved_bpages--;
1229         bz->active_bpages++;
1230         mtx_unlock(&bounce_lock);
1231
1232         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1233                 /* Page offset needs to be preserved. */
1234                 bpage->vaddr |= addr1 & PAGE_MASK;
1235                 bpage->busaddr |= addr1 & PAGE_MASK;
1236                 KASSERT(addr2 == 0,
1237         ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
1238         }
1239         bpage->datavaddr = vaddr;
1240         bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
1241         KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
1242         bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
1243         bpage->dataoffs = addr1 & PAGE_MASK;
1244         bpage->datacount = size;
1245         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1246         return (bpage->busaddr);
1247 }
1248
1249 static void
1250 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1251 {
1252         struct bus_dmamap *map;
1253         struct bounce_zone *bz;
1254
1255         bz = dmat->bounce_zone;
1256         bpage->datavaddr = 0;
1257         bpage->datacount = 0;
1258         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1259                 /*
1260                  * Reset the bounce page to start at offset 0.  Other uses
1261                  * of this bounce page may need to store a full page of
1262                  * data and/or assume it starts on a page boundary.
1263                  */
1264                 bpage->vaddr &= ~PAGE_MASK;
1265                 bpage->busaddr &= ~PAGE_MASK;
1266         }
1267
1268         mtx_lock(&bounce_lock);
1269         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1270         bz->free_bpages++;
1271         bz->active_bpages--;
1272         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1273                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1274                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1275                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1276                             map, links);
1277                         busdma_swi_pending = 1;
1278                         bz->total_deferred++;
1279                         swi_sched(vm_ih, 0);
1280                 }
1281         }
1282         mtx_unlock(&bounce_lock);
1283 }
1284
1285 void
1286 busdma_swi(void)
1287 {
1288         bus_dma_tag_t dmat;
1289         struct bus_dmamap *map;
1290
1291         mtx_lock(&bounce_lock);
1292         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1293                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1294                 mtx_unlock(&bounce_lock);
1295                 dmat = map->dmat;
1296                 (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
1297                 bus_dmamap_load_mem(map->dmat, map, &map->mem,
1298                     map->callback, map->callback_arg, BUS_DMA_WAITOK);
1299                 (dmat->common.lockfunc)(dmat->common.lockfuncarg,
1300                     BUS_DMA_UNLOCK);
1301                 mtx_lock(&bounce_lock);
1302         }
1303         mtx_unlock(&bounce_lock);
1304 }
1305
1306 struct bus_dma_impl bus_dma_bounce_impl = {
1307         .tag_create = bounce_bus_dma_tag_create,
1308         .tag_destroy = bounce_bus_dma_tag_destroy,
1309         .tag_set_domain = bounce_bus_dma_tag_set_domain,
1310         .map_create = bounce_bus_dmamap_create,
1311         .map_destroy = bounce_bus_dmamap_destroy,
1312         .mem_alloc = bounce_bus_dmamem_alloc,
1313         .mem_free = bounce_bus_dmamem_free,
1314         .load_phys = bounce_bus_dmamap_load_phys,
1315         .load_buffer = bounce_bus_dmamap_load_buffer,
1316         .load_ma = bounce_bus_dmamap_load_ma,
1317         .map_waitok = bounce_bus_dmamap_waitok,
1318         .map_complete = bounce_bus_dmamap_complete,
1319         .map_unload = bounce_bus_dmamap_unload,
1320         .map_sync = bounce_bus_dmamap_sync,
1321 };