]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/riscv/riscv/busdma_bounce.c
zfs: merge openzfs/zfs@e25f9131d (zfs-2.1-release) into stable/13
[FreeBSD/FreeBSD.git] / sys / riscv / riscv / busdma_bounce.c
1 /*-
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * Copyright (c) 2015-2016 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Andrew Turner
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Portions of this software were developed by Semihalf
10  * under sponsorship of the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions, and the following disclaimer,
17  *    without modification, immediately at the beginning of the file.
18  * 2. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/bus.h>
41 #include <sys/interrupt.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/proc.h>
46 #include <sys/memdesc.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/uio.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_map.h>
56
57 #include <machine/atomic.h>
58 #include <machine/bus.h>
59 #include <machine/md_var.h>
60 #include <machine/bus_dma_impl.h>
61
62 #define MAX_BPAGES 4096
63
64 enum {
65         BF_COULD_BOUNCE         = 0x01,
66         BF_MIN_ALLOC_COMP       = 0x02,
67         BF_KMEM_ALLOC           = 0x04,
68         BF_COHERENT             = 0x10,
69 };
70
71 struct bounce_zone;
72
73 struct bus_dma_tag {
74         struct bus_dma_tag_common common;
75         int                     map_count;
76         int                     bounce_flags;
77         bus_dma_segment_t       *segments;
78         struct bounce_zone      *bounce_zone;
79 };
80
81 struct bounce_page {
82         vm_offset_t     vaddr;          /* kva of bounce buffer */
83         bus_addr_t      busaddr;        /* Physical address */
84         vm_offset_t     datavaddr;      /* kva of client data */
85         vm_page_t       datapage;       /* physical page of client data */
86         vm_offset_t     dataoffs;       /* page offset of client data */
87         bus_size_t      datacount;      /* client data count */
88         STAILQ_ENTRY(bounce_page) links;
89 };
90
91 struct bounce_zone {
92         STAILQ_ENTRY(bounce_zone) links;
93         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
94         int             total_bpages;
95         int             free_bpages;
96         int             reserved_bpages;
97         int             active_bpages;
98         int             total_bounced;
99         int             total_deferred;
100         int             map_count;
101         bus_size_t      alignment;
102         bus_addr_t      lowaddr;
103         char            zoneid[8];
104         char            lowaddrid[20];
105         struct sysctl_ctx_list sysctl_tree;
106         struct sysctl_oid *sysctl_tree_top;
107 };
108
109 static struct mtx bounce_lock;
110 static int total_bpages;
111 static int busdma_zonecount;
112 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
113 static void *busdma_ih;
114
115 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
116     "Busdma parameters");
117 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
118            "Total bounce pages");
119
120 struct sync_list {
121         vm_offset_t     vaddr;          /* kva of client data */
122         bus_addr_t      paddr;          /* physical address */
123         vm_page_t       pages;          /* starting page of client data */
124         bus_size_t      datacount;      /* client data count */
125 };
126
127 struct bus_dmamap {
128         struct bp_list         bpages;
129         int                    pagesneeded;
130         int                    pagesreserved;
131         bus_dma_tag_t          dmat;
132         struct memdesc         mem;
133         bus_dmamap_callback_t *callback;
134         void                  *callback_arg;
135         STAILQ_ENTRY(bus_dmamap) links;
136         u_int                   flags;
137 #define DMAMAP_COULD_BOUNCE     (1 << 0)
138 #define DMAMAP_FROM_DMAMEM      (1 << 1)
139         int                     sync_count;
140         struct sync_list        slist[];
141 };
142
143 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
144 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
145
146 static void init_bounce_pages(void *dummy);
147 static int alloc_bounce_zone(bus_dma_tag_t dmat);
148 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
149 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
150     int commit);
151 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
152     vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
153 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
154 int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
155 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
156     pmap_t pmap, void *buf, bus_size_t buflen, int flags);
157 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
158     vm_paddr_t buf, bus_size_t buflen, int flags);
159 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
160     int flags);
161
162 /*
163  * Allocate a device specific dma_tag.
164  */
165 static int
166 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
167     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
168     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
169     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
170     void *lockfuncarg, bus_dma_tag_t *dmat)
171 {
172         bus_dma_tag_t newtag;
173         int error;
174
175         *dmat = NULL;
176         error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
177             NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
178             maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
179             sizeof (struct bus_dma_tag), (void **)&newtag);
180         if (error != 0)
181                 return (error);
182
183         newtag->common.impl = &bus_dma_bounce_impl;
184         newtag->map_count = 0;
185         newtag->segments = NULL;
186
187         if ((flags & BUS_DMA_COHERENT) != 0)
188                 newtag->bounce_flags |= BF_COHERENT;
189
190         if (parent != NULL) {
191                 if ((newtag->common.filter != NULL ||
192                     (parent->bounce_flags & BF_COULD_BOUNCE) != 0))
193                         newtag->bounce_flags |= BF_COULD_BOUNCE;
194
195                 /* Copy some flags from the parent */
196                 newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT;
197         }
198
199         if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
200             newtag->common.alignment > 1)
201                 newtag->bounce_flags |= BF_COULD_BOUNCE;
202
203         if (((newtag->bounce_flags & BF_COULD_BOUNCE) != 0) &&
204             (flags & BUS_DMA_ALLOCNOW) != 0) {
205                 struct bounce_zone *bz;
206
207                 /* Must bounce */
208                 if ((error = alloc_bounce_zone(newtag)) != 0) {
209                         free(newtag, M_DEVBUF);
210                         return (error);
211                 }
212                 bz = newtag->bounce_zone;
213
214                 if (ptoa(bz->total_bpages) < maxsize) {
215                         int pages;
216
217                         pages = atop(round_page(maxsize)) - bz->total_bpages;
218
219                         /* Add pages to our bounce pool */
220                         if (alloc_bounce_pages(newtag, pages) < pages)
221                                 error = ENOMEM;
222                 }
223                 /* Performed initial allocation */
224                 newtag->bounce_flags |= BF_MIN_ALLOC_COMP;
225         } else
226                 error = 0;
227
228         if (error != 0)
229                 free(newtag, M_DEVBUF);
230         else
231                 *dmat = newtag;
232         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
233             __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
234             error);
235         return (error);
236 }
237
238 static int
239 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
240 {
241         bus_dma_tag_t dmat_copy, parent;
242         int error;
243
244         error = 0;
245         dmat_copy = dmat;
246
247         if (dmat != NULL) {
248                 if (dmat->map_count != 0) {
249                         error = EBUSY;
250                         goto out;
251                 }
252                 while (dmat != NULL) {
253                         parent = (bus_dma_tag_t)dmat->common.parent;
254                         atomic_subtract_int(&dmat->common.ref_count, 1);
255                         if (dmat->common.ref_count == 0) {
256                                 if (dmat->segments != NULL)
257                                         free(dmat->segments, M_DEVBUF);
258                                 free(dmat, M_DEVBUF);
259                                 /*
260                                  * Last reference count, so
261                                  * release our reference
262                                  * count on our parent.
263                                  */
264                                 dmat = parent;
265                         } else
266                                 dmat = NULL;
267                 }
268         }
269 out:
270         CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
271         return (error);
272 }
273
274 static bus_dmamap_t
275 alloc_dmamap(bus_dma_tag_t dmat, int flags)
276 {
277         u_long mapsize;
278         bus_dmamap_t map;
279
280         mapsize = sizeof(*map);
281         mapsize += sizeof(struct sync_list) * dmat->common.nsegments;
282         map = malloc(mapsize, M_DEVBUF, flags | M_ZERO);
283         if (map == NULL)
284                 return (NULL);
285
286         /* Initialize the new map */
287         STAILQ_INIT(&map->bpages);
288
289         return (map);
290 }
291
292 /*
293  * Allocate a handle for mapping from kva/uva/physical
294  * address space into bus device space.
295  */
296 static int
297 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
298 {
299         struct bounce_zone *bz;
300         int error, maxpages, pages;
301
302         error = 0;
303
304         if (dmat->segments == NULL) {
305                 dmat->segments = (bus_dma_segment_t *)malloc(
306                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
307                     M_DEVBUF, M_NOWAIT);
308                 if (dmat->segments == NULL) {
309                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
310                             __func__, dmat, ENOMEM);
311                         return (ENOMEM);
312                 }
313         }
314
315         *mapp = alloc_dmamap(dmat, M_NOWAIT);
316         if (*mapp == NULL) {
317                 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
318                     __func__, dmat, ENOMEM);
319                 return (ENOMEM);
320         }
321
322         /*
323          * Bouncing might be required if the driver asks for an active
324          * exclusion region, a data alignment that is stricter than 1, and/or
325          * an active address boundary.
326          */
327         if (dmat->bounce_flags & BF_COULD_BOUNCE) {
328                 /* Must bounce */
329                 if (dmat->bounce_zone == NULL) {
330                         if ((error = alloc_bounce_zone(dmat)) != 0) {
331                                 free(*mapp, M_DEVBUF);
332                                 return (error);
333                         }
334                 }
335                 bz = dmat->bounce_zone;
336
337                 (*mapp)->flags = DMAMAP_COULD_BOUNCE;
338
339                 /*
340                  * Attempt to add pages to our pool on a per-instance
341                  * basis up to a sane limit.
342                  */
343                 if (dmat->common.alignment > 1)
344                         maxpages = MAX_BPAGES;
345                 else
346                         maxpages = MIN(MAX_BPAGES, Maxmem -
347                             atop(dmat->common.lowaddr));
348                 if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0 ||
349                     (bz->map_count > 0 && bz->total_bpages < maxpages)) {
350                         pages = MAX(atop(dmat->common.maxsize), 1);
351                         pages = MIN(maxpages - bz->total_bpages, pages);
352                         pages = MAX(pages, 1);
353                         if (alloc_bounce_pages(dmat, pages) < pages)
354                                 error = ENOMEM;
355                         if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP)
356                             == 0) {
357                                 if (error == 0) {
358                                         dmat->bounce_flags |=
359                                             BF_MIN_ALLOC_COMP;
360                                 }
361                         } else
362                                 error = 0;
363                 }
364                 bz->map_count++;
365         }
366         if (error == 0)
367                 dmat->map_count++;
368         else
369                 free(*mapp, M_DEVBUF);
370         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
371             __func__, dmat, dmat->common.flags, error);
372         return (error);
373 }
374
375 /*
376  * Destroy a handle for mapping from kva/uva/physical
377  * address space into bus device space.
378  */
379 static int
380 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
381 {
382
383         /* Check we are destroying the correct map type */
384         if ((map->flags & DMAMAP_FROM_DMAMEM) != 0)
385                 panic("bounce_bus_dmamap_destroy: Invalid map freed\n");
386
387         if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
388                 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY);
389                 return (EBUSY);
390         }
391         if (dmat->bounce_zone) {
392                 KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
393                     ("%s: Bounce zone when cannot bounce", __func__));
394                 dmat->bounce_zone->map_count--;
395         }
396         free(map, M_DEVBUF);
397         dmat->map_count--;
398         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
399         return (0);
400 }
401
402 /*
403  * Allocate a piece of memory that can be efficiently mapped into
404  * bus device space based on the constraints lited in the dma tag.
405  * A dmamap to for use with dmamap_load is also allocated.
406  */
407 static int
408 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
409     bus_dmamap_t *mapp)
410 {
411         /*
412          * XXX ARM64TODO:
413          * This bus_dma implementation requires IO-Coherent architecutre.
414          * If IO-Coherency is not guaranteed, the BUS_DMA_COHERENT flag has
415          * to be implented using non-cacheable memory.
416          */
417
418         vm_memattr_t attr;
419         int mflags;
420
421         if (flags & BUS_DMA_NOWAIT)
422                 mflags = M_NOWAIT;
423         else
424                 mflags = M_WAITOK;
425
426         if (dmat->segments == NULL) {
427                 dmat->segments = (bus_dma_segment_t *)malloc(
428                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
429                     M_DEVBUF, mflags);
430                 if (dmat->segments == NULL) {
431                         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
432                             __func__, dmat, dmat->common.flags, ENOMEM);
433                         return (ENOMEM);
434                 }
435         }
436         if (flags & BUS_DMA_ZERO)
437                 mflags |= M_ZERO;
438         if (flags & BUS_DMA_NOCACHE)
439                 attr = VM_MEMATTR_UNCACHEABLE;
440         else if ((flags & BUS_DMA_COHERENT) != 0 &&
441             (dmat->bounce_flags & BF_COHERENT) == 0)
442                 /*
443                  * If we have a non-coherent tag, and are trying to allocate
444                  * a coherent block of memory it needs to be uncached.
445                  */
446                 attr = VM_MEMATTR_UNCACHEABLE;
447         else
448                 attr = VM_MEMATTR_DEFAULT;
449
450         /*
451          * Create the map, but don't set the could bounce flag as
452          * this allocation should never bounce;
453          */
454         *mapp = alloc_dmamap(dmat, mflags);
455         if (*mapp == NULL) {
456                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
457                     __func__, dmat, dmat->common.flags, ENOMEM);
458                 return (ENOMEM);
459         }
460         (*mapp)->flags = DMAMAP_FROM_DMAMEM;
461
462         /*
463          * Allocate the buffer from the malloc(9) allocator if...
464          *  - It's small enough to fit into a single power of two sized bucket.
465          *  - The alignment is less than or equal to the maximum size
466          *  - The low address requirement is fulfilled.
467          * else allocate non-contiguous pages if...
468          *  - The page count that could get allocated doesn't exceed
469          *    nsegments also when the maximum segment size is less
470          *    than PAGE_SIZE.
471          *  - The alignment constraint isn't larger than a page boundary.
472          *  - There are no boundary-crossing constraints.
473          * else allocate a block of contiguous pages because one or more of the
474          * constraints is something that only the contig allocator can fulfill.
475          *
476          * NOTE: The (dmat->common.alignment <= dmat->maxsize) check
477          * below is just a quick hack. The exact alignment guarantees
478          * of malloc(9) need to be nailed down, and the code below
479          * should be rewritten to take that into account.
480          *
481          * In the meantime warn the user if malloc gets it wrong.
482          */
483         if ((dmat->common.maxsize <= PAGE_SIZE) &&
484            (dmat->common.alignment <= dmat->common.maxsize) &&
485             dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
486             attr == VM_MEMATTR_DEFAULT) {
487                 *vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
488         } else if (dmat->common.nsegments >=
489             howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
490             dmat->common.alignment <= PAGE_SIZE &&
491             (dmat->common.boundary % PAGE_SIZE) == 0) {
492                 /* Page-based multi-segment allocations allowed */
493                 *vaddr = (void *)kmem_alloc_attr(dmat->common.maxsize, mflags,
494                     0ul, dmat->common.lowaddr, attr);
495                 dmat->bounce_flags |= BF_KMEM_ALLOC;
496         } else {
497                 *vaddr = (void *)kmem_alloc_contig(dmat->common.maxsize, mflags,
498                     0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ?
499                     dmat->common.alignment : 1ul, dmat->common.boundary, attr);
500                 dmat->bounce_flags |= BF_KMEM_ALLOC;
501         }
502         if (*vaddr == NULL) {
503                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
504                     __func__, dmat, dmat->common.flags, ENOMEM);
505                 free(*mapp, M_DEVBUF);
506                 return (ENOMEM);
507         } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
508                 printf("bus_dmamem_alloc failed to align memory properly.\n");
509         }
510         dmat->map_count++;
511         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
512             __func__, dmat, dmat->common.flags, 0);
513         return (0);
514 }
515
516 /*
517  * Free a piece of memory and it's allociated dmamap, that was allocated
518  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
519  */
520 static void
521 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
522 {
523
524         /*
525          * Check the map came from bounce_bus_dmamem_alloc, so the map
526          * should be NULL and the BF_KMEM_ALLOC flag cleared if malloc()
527          * was used and set if kmem_alloc_contig() was used.
528          */
529         if ((map->flags & DMAMAP_FROM_DMAMEM) == 0)
530                 panic("bus_dmamem_free: Invalid map freed\n");
531         if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
532                 free(vaddr, M_DEVBUF);
533         else
534                 kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
535         free(map, M_DEVBUF);
536         dmat->map_count--;
537         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
538             dmat->bounce_flags);
539 }
540
541 static void
542 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
543     bus_size_t buflen, int flags)
544 {
545         bus_addr_t curaddr;
546         bus_size_t sgsize;
547
548         if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
549                 /*
550                  * Count the number of bounce pages
551                  * needed in order to complete this transfer
552                  */
553                 curaddr = buf;
554                 while (buflen != 0) {
555                         sgsize = MIN(buflen, dmat->common.maxsegsz);
556                         if (bus_dma_run_filter(&dmat->common, curaddr)) {
557                                 sgsize = MIN(sgsize,
558                                     PAGE_SIZE - (curaddr & PAGE_MASK));
559                                 map->pagesneeded++;
560                         }
561                         curaddr += sgsize;
562                         buflen -= sgsize;
563                 }
564                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
565         }
566 }
567
568 static void
569 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
570     void *buf, bus_size_t buflen, int flags)
571 {
572         vm_offset_t vaddr;
573         vm_offset_t vendaddr;
574         bus_addr_t paddr;
575         bus_size_t sg_len;
576
577         if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
578                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
579                     "alignment= %d", dmat->common.lowaddr,
580                     ptoa((vm_paddr_t)Maxmem),
581                     dmat->common.boundary, dmat->common.alignment);
582                 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map,
583                     map->pagesneeded);
584                 /*
585                  * Count the number of bounce pages
586                  * needed in order to complete this transfer
587                  */
588                 vaddr = (vm_offset_t)buf;
589                 vendaddr = (vm_offset_t)buf + buflen;
590
591                 while (vaddr < vendaddr) {
592                         sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
593                         if (pmap == kernel_pmap)
594                                 paddr = pmap_kextract(vaddr);
595                         else
596                                 paddr = pmap_extract(pmap, vaddr);
597                         if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
598                                 sg_len = roundup2(sg_len,
599                                     dmat->common.alignment);
600                                 map->pagesneeded++;
601                         }
602                         vaddr += sg_len;
603                 }
604                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
605         }
606 }
607
608 static int
609 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
610 {
611
612         /* Reserve Necessary Bounce Pages */
613         mtx_lock(&bounce_lock);
614         if (flags & BUS_DMA_NOWAIT) {
615                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
616                         mtx_unlock(&bounce_lock);
617                         return (ENOMEM);
618                 }
619         } else {
620                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
621                         /* Queue us for resources */
622                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
623                         mtx_unlock(&bounce_lock);
624                         return (EINPROGRESS);
625                 }
626         }
627         mtx_unlock(&bounce_lock);
628
629         return (0);
630 }
631
632 /*
633  * Add a single contiguous physical range to the segment list.
634  */
635 static bus_size_t
636 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
637     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
638 {
639         int seg;
640
641         /*
642          * Make sure we don't cross any boundaries.
643          */
644         if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
645                 sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
646
647         /*
648          * Insert chunk into a segment, coalescing with
649          * previous segment if possible.
650          */
651         seg = *segp;
652         if (seg == -1) {
653                 seg = 0;
654                 segs[seg].ds_addr = curaddr;
655                 segs[seg].ds_len = sgsize;
656         } else {
657                 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
658                     (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
659                     vm_addr_bound_ok(segs[seg].ds_addr,
660                     segs[seg].ds_len + sgsize, dmat->common.boundary))
661                         segs[seg].ds_len += sgsize;
662                 else {
663                         if (++seg >= dmat->common.nsegments)
664                                 return (0);
665                         segs[seg].ds_addr = curaddr;
666                         segs[seg].ds_len = sgsize;
667                 }
668         }
669         *segp = seg;
670         return (sgsize);
671 }
672
673 /*
674  * Utility function to load a physical buffer.  segp contains
675  * the starting segment on entrace, and the ending segment on exit.
676  */
677 static int
678 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
679     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
680     int *segp)
681 {
682         struct sync_list *sl;
683         bus_size_t sgsize;
684         bus_addr_t curaddr, sl_end;
685         int error;
686
687         if (segs == NULL)
688                 segs = dmat->segments;
689
690         if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
691                 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
692                 if (map->pagesneeded != 0) {
693                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
694                         if (error)
695                                 return (error);
696                 }
697         }
698
699         sl = map->slist + map->sync_count - 1;
700         sl_end = 0;
701
702         while (buflen > 0) {
703                 curaddr = buf;
704                 sgsize = MIN(buflen, dmat->common.maxsegsz);
705                 if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) &&
706                     map->pagesneeded != 0 &&
707                     bus_dma_run_filter(&dmat->common, curaddr)) {
708                         sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
709                         curaddr = add_bounce_page(dmat, map, 0, curaddr,
710                             sgsize);
711                 } else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
712                         if (map->sync_count > 0)
713                                 sl_end = sl->paddr + sl->datacount;
714
715                         if (map->sync_count == 0 || curaddr != sl_end) {
716                                 if (++map->sync_count > dmat->common.nsegments)
717                                         break;
718                                 sl++;
719                                 sl->vaddr = 0;
720                                 sl->paddr = curaddr;
721                                 sl->datacount = sgsize;
722                                 sl->pages = PHYS_TO_VM_PAGE(curaddr);
723                                 KASSERT(sl->pages != NULL,
724                                     ("%s: page at PA:0x%08lx is not in "
725                                     "vm_page_array", __func__, curaddr));
726                         } else
727                                 sl->datacount += sgsize;
728                 }
729                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
730                     segp);
731                 if (sgsize == 0)
732                         break;
733                 buf += sgsize;
734                 buflen -= sgsize;
735         }
736
737         /*
738          * Did we fit?
739          */
740         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
741 }
742
743 /*
744  * Utility function to load a linear buffer.  segp contains
745  * the starting segment on entrace, and the ending segment on exit.
746  */
747 static int
748 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
749     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
750     int *segp)
751 {
752         struct sync_list *sl;
753         bus_size_t sgsize, max_sgsize;
754         bus_addr_t curaddr, sl_pend;
755         vm_offset_t kvaddr, vaddr, sl_vend;
756         int error;
757
758         if (segs == NULL)
759                 segs = dmat->segments;
760
761         if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
762                 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
763                 if (map->pagesneeded != 0) {
764                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
765                         if (error)
766                                 return (error);
767                 }
768         }
769
770         sl = map->slist + map->sync_count - 1;
771         vaddr = (vm_offset_t)buf;
772         sl_pend = 0;
773         sl_vend = 0;
774
775         while (buflen > 0) {
776                 /*
777                  * Get the physical address for this segment.
778                  */
779                 if (pmap == kernel_pmap) {
780                         curaddr = pmap_kextract(vaddr);
781                         kvaddr = vaddr;
782                 } else {
783                         curaddr = pmap_extract(pmap, vaddr);
784                         kvaddr = 0;
785                 }
786
787                 /*
788                  * Compute the segment size, and adjust counts.
789                  */
790                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
791                 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
792                 if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) &&
793                     map->pagesneeded != 0 &&
794                     bus_dma_run_filter(&dmat->common, curaddr)) {
795                         sgsize = roundup2(sgsize, dmat->common.alignment);
796                         sgsize = MIN(sgsize, max_sgsize);
797                         curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
798                             sgsize);
799                 } else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
800                         sgsize = MIN(sgsize, max_sgsize);
801                         if (map->sync_count > 0) {
802                                 sl_pend = sl->paddr + sl->datacount;
803                                 sl_vend = sl->vaddr + sl->datacount;
804                         }
805
806                         if (map->sync_count == 0 ||
807                             (kvaddr != 0 && kvaddr != sl_vend) ||
808                             (curaddr != sl_pend)) {
809                                 if (++map->sync_count > dmat->common.nsegments)
810                                         goto cleanup;
811                                 sl++;
812                                 sl->vaddr = kvaddr;
813                                 sl->paddr = curaddr;
814                                 if (kvaddr != 0) {
815                                         sl->pages = NULL;
816                                 } else {
817                                         sl->pages = PHYS_TO_VM_PAGE(curaddr);
818                                         KASSERT(sl->pages != NULL,
819                                             ("%s: page at PA:0x%08lx is not "
820                                             "in vm_page_array", __func__,
821                                             curaddr));
822                                 }
823                                 sl->datacount = sgsize;
824                         } else
825                                 sl->datacount += sgsize;
826                 } else {
827                         sgsize = MIN(sgsize, max_sgsize);
828                 }
829                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
830                     segp);
831                 if (sgsize == 0)
832                         break;
833                 vaddr += sgsize;
834                 buflen -= sgsize;
835         }
836
837 cleanup:
838         /*
839          * Did we fit?
840          */
841         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
842 }
843
844 static void
845 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
846     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
847 {
848
849         if ((map->flags & DMAMAP_COULD_BOUNCE) == 0)
850                 return;
851         map->mem = *mem;
852         map->dmat = dmat;
853         map->callback = callback;
854         map->callback_arg = callback_arg;
855 }
856
857 static bus_dma_segment_t *
858 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
859     bus_dma_segment_t *segs, int nsegs, int error)
860 {
861
862         if (segs == NULL)
863                 segs = dmat->segments;
864         return (segs);
865 }
866
867 /*
868  * Release the mapping held by map.
869  */
870 static void
871 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
872 {
873         struct bounce_page *bpage;
874
875         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
876                 STAILQ_REMOVE_HEAD(&map->bpages, links);
877                 free_bounce_page(dmat, bpage);
878         }
879
880         map->sync_count = 0;
881 }
882
883 static void
884 dma_preread_safe(vm_offset_t va, vm_size_t size)
885 {
886         /*
887          * Write back any partial cachelines immediately before and
888          * after the DMA region.
889          */
890         if (va & (dcache_line_size - 1))
891                 cpu_dcache_wb_range(va, 1);
892         if ((va + size) & (dcache_line_size - 1))
893                 cpu_dcache_wb_range(va + size, 1);
894
895         cpu_dcache_inv_range(va, size);
896 }
897
898 static void
899 dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
900 {
901         uint32_t len, offset;
902         vm_page_t m;
903         vm_paddr_t pa;
904         vm_offset_t va, tempva;
905         bus_size_t size;
906
907         offset = sl->paddr & PAGE_MASK;
908         m = sl->pages;
909         size = sl->datacount;
910         pa = sl->paddr;
911
912         for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
913                 tempva = 0;
914                 if (sl->vaddr == 0) {
915                         len = min(PAGE_SIZE - offset, size);
916                         tempva = pmap_quick_enter_page(m);
917                         va = tempva | offset;
918                         KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
919                             ("unexpected vm_page_t phys: 0x%16lx != 0x%16lx",
920                             VM_PAGE_TO_PHYS(m) | offset, pa));
921                 } else {
922                         len = sl->datacount;
923                         va = sl->vaddr;
924                 }
925
926                 switch (op) {
927                 case BUS_DMASYNC_PREWRITE:
928                 case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
929                         cpu_dcache_wb_range(va, len);
930                         break;
931                 case BUS_DMASYNC_PREREAD:
932                         /*
933                          * An mbuf may start in the middle of a cacheline. There
934                          * will be no cpu writes to the beginning of that line
935                          * (which contains the mbuf header) while dma is in
936                          * progress.  Handle that case by doing a writeback of
937                          * just the first cacheline before invalidating the
938                          * overall buffer.  Any mbuf in a chain may have this
939                          * misalignment.  Buffers which are not mbufs bounce if
940                          * they are not aligned to a cacheline.
941                          */
942                         dma_preread_safe(va, len);
943                         break;
944                 case BUS_DMASYNC_POSTREAD:
945                 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
946                         cpu_dcache_inv_range(va, len);
947                         break;
948                 default:
949                         panic("unsupported combination of sync operations: "
950                               "0x%08x\n", op);
951                 }
952
953                 if (tempva != 0)
954                         pmap_quick_remove_page(tempva);
955         }
956 }
957
958 static void
959 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
960     bus_dmasync_op_t op)
961 {
962         struct bounce_page *bpage;
963         struct sync_list *sl, *end;
964         vm_offset_t datavaddr, tempvaddr;
965
966         if (op == BUS_DMASYNC_POSTWRITE)
967                 return;
968
969         if ((op & BUS_DMASYNC_POSTREAD) != 0) {
970                 /*
971                  * Wait for any DMA operations to complete before the bcopy.
972                  */
973                 fence();
974         }
975
976         if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
977                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
978                     "performing bounce", __func__, dmat, dmat->common.flags,
979                     op);
980
981                 if ((op & BUS_DMASYNC_PREWRITE) != 0) {
982                         while (bpage != NULL) {
983                                 tempvaddr = 0;
984                                 datavaddr = bpage->datavaddr;
985                                 if (datavaddr == 0) {
986                                         tempvaddr = pmap_quick_enter_page(
987                                             bpage->datapage);
988                                         datavaddr = tempvaddr | bpage->dataoffs;
989                                 }
990
991                                 bcopy((void *)datavaddr,
992                                     (void *)bpage->vaddr, bpage->datacount);
993                                 if (tempvaddr != 0)
994                                         pmap_quick_remove_page(tempvaddr);
995                                 if ((dmat->bounce_flags & BF_COHERENT) == 0)
996                                         cpu_dcache_wb_range(bpage->vaddr,
997                                             bpage->datacount);
998                                 bpage = STAILQ_NEXT(bpage, links);
999                         }
1000                         dmat->bounce_zone->total_bounced++;
1001                 } else if ((op & BUS_DMASYNC_PREREAD) != 0) {
1002                         while (bpage != NULL) {
1003                                 if ((dmat->bounce_flags & BF_COHERENT) == 0)
1004                                         cpu_dcache_wbinv_range(bpage->vaddr,
1005                                             bpage->datacount);
1006                                 bpage = STAILQ_NEXT(bpage, links);
1007                         }
1008                 }
1009
1010                 if ((op & BUS_DMASYNC_POSTREAD) != 0) {
1011                         while (bpage != NULL) {
1012                                 if ((dmat->bounce_flags & BF_COHERENT) == 0)
1013                                         cpu_dcache_inv_range(bpage->vaddr,
1014                                             bpage->datacount);
1015                                 tempvaddr = 0;
1016                                 datavaddr = bpage->datavaddr;
1017                                 if (datavaddr == 0) {
1018                                         tempvaddr = pmap_quick_enter_page(
1019                                             bpage->datapage);
1020                                         datavaddr = tempvaddr | bpage->dataoffs;
1021                                 }
1022
1023                                 bcopy((void *)bpage->vaddr,
1024                                     (void *)datavaddr, bpage->datacount);
1025
1026                                 if (tempvaddr != 0)
1027                                         pmap_quick_remove_page(tempvaddr);
1028                                 bpage = STAILQ_NEXT(bpage, links);
1029                         }
1030                         dmat->bounce_zone->total_bounced++;
1031                 }
1032         }
1033
1034         /*
1035          * Cache maintenance for normal (non-COHERENT non-bounce) buffers.
1036          */
1037         if (map->sync_count != 0) {
1038                 sl = &map->slist[0];
1039                 end = &map->slist[map->sync_count];
1040                 CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x "
1041                     "performing sync", __func__, dmat, op);
1042
1043                 for ( ; sl != end; ++sl)
1044                         dma_dcache_sync(sl, op);
1045         }
1046
1047         if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) {
1048                 /*
1049                  * Wait for the bcopy to complete before any DMA operations.
1050                  */
1051                 fence();
1052         }
1053 }
1054
1055 static void
1056 init_bounce_pages(void *dummy __unused)
1057 {
1058
1059         total_bpages = 0;
1060         STAILQ_INIT(&bounce_zone_list);
1061         STAILQ_INIT(&bounce_map_waitinglist);
1062         STAILQ_INIT(&bounce_map_callbacklist);
1063         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1064 }
1065 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1066
1067 static struct sysctl_ctx_list *
1068 busdma_sysctl_tree(struct bounce_zone *bz)
1069 {
1070
1071         return (&bz->sysctl_tree);
1072 }
1073
1074 static struct sysctl_oid *
1075 busdma_sysctl_tree_top(struct bounce_zone *bz)
1076 {
1077
1078         return (bz->sysctl_tree_top);
1079 }
1080
1081 static int
1082 alloc_bounce_zone(bus_dma_tag_t dmat)
1083 {
1084         struct bounce_zone *bz;
1085
1086         /* Check to see if we already have a suitable zone */
1087         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1088                 if ((dmat->common.alignment <= bz->alignment) &&
1089                     (dmat->common.lowaddr >= bz->lowaddr)) {
1090                         dmat->bounce_zone = bz;
1091                         return (0);
1092                 }
1093         }
1094
1095         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1096             M_NOWAIT | M_ZERO)) == NULL)
1097                 return (ENOMEM);
1098
1099         STAILQ_INIT(&bz->bounce_page_list);
1100         bz->free_bpages = 0;
1101         bz->reserved_bpages = 0;
1102         bz->active_bpages = 0;
1103         bz->lowaddr = dmat->common.lowaddr;
1104         bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
1105         bz->map_count = 0;
1106         snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1107         busdma_zonecount++;
1108         snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1109         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1110         dmat->bounce_zone = bz;
1111
1112         sysctl_ctx_init(&bz->sysctl_tree);
1113         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1114             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1115             CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
1116         if (bz->sysctl_tree_top == NULL) {
1117                 sysctl_ctx_free(&bz->sysctl_tree);
1118                 return (0);     /* XXX error code? */
1119         }
1120
1121         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1122             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1123             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1124             "Total bounce pages");
1125         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1126             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1127             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1128             "Free bounce pages");
1129         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1130             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1131             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1132             "Reserved bounce pages");
1133         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1134             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1135             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1136             "Active bounce pages");
1137         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1138             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1139             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1140             "Total bounce requests");
1141         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1142             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1143             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1144             "Total bounce requests that were deferred");
1145         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1146             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1147             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1148         SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1149             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1150             "alignment", CTLFLAG_RD, &bz->alignment, "");
1151
1152         return (0);
1153 }
1154
1155 static int
1156 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1157 {
1158         struct bounce_zone *bz;
1159         int count;
1160
1161         bz = dmat->bounce_zone;
1162         count = 0;
1163         while (numpages > 0) {
1164                 struct bounce_page *bpage;
1165
1166                 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1167                                                      M_NOWAIT | M_ZERO);
1168
1169                 if (bpage == NULL)
1170                         break;
1171                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1172                     M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
1173                 if (bpage->vaddr == 0) {
1174                         free(bpage, M_DEVBUF);
1175                         break;
1176                 }
1177                 bpage->busaddr = pmap_kextract(bpage->vaddr);
1178                 mtx_lock(&bounce_lock);
1179                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1180                 total_bpages++;
1181                 bz->total_bpages++;
1182                 bz->free_bpages++;
1183                 mtx_unlock(&bounce_lock);
1184                 count++;
1185                 numpages--;
1186         }
1187         return (count);
1188 }
1189
1190 static int
1191 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1192 {
1193         struct bounce_zone *bz;
1194         int pages;
1195
1196         mtx_assert(&bounce_lock, MA_OWNED);
1197         bz = dmat->bounce_zone;
1198         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1199         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1200                 return (map->pagesneeded - (map->pagesreserved + pages));
1201         bz->free_bpages -= pages;
1202         bz->reserved_bpages += pages;
1203         map->pagesreserved += pages;
1204         pages = map->pagesneeded - map->pagesreserved;
1205
1206         return (pages);
1207 }
1208
1209 static bus_addr_t
1210 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1211                 bus_addr_t addr, bus_size_t size)
1212 {
1213         struct bounce_zone *bz;
1214         struct bounce_page *bpage;
1215
1216         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1217         KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
1218             ("add_bounce_page: bad map %p", map));
1219
1220         bz = dmat->bounce_zone;
1221         if (map->pagesneeded == 0)
1222                 panic("add_bounce_page: map doesn't need any pages");
1223         map->pagesneeded--;
1224
1225         if (map->pagesreserved == 0)
1226                 panic("add_bounce_page: map doesn't need any pages");
1227         map->pagesreserved--;
1228
1229         mtx_lock(&bounce_lock);
1230         bpage = STAILQ_FIRST(&bz->bounce_page_list);
1231         if (bpage == NULL)
1232                 panic("add_bounce_page: free page list is empty");
1233
1234         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1235         bz->reserved_bpages--;
1236         bz->active_bpages++;
1237         mtx_unlock(&bounce_lock);
1238
1239         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1240                 /* Page offset needs to be preserved. */
1241                 bpage->vaddr |= addr & PAGE_MASK;
1242                 bpage->busaddr |= addr & PAGE_MASK;
1243         }
1244         bpage->datavaddr = vaddr;
1245         bpage->datapage = PHYS_TO_VM_PAGE(addr);
1246         bpage->dataoffs = addr & PAGE_MASK;
1247         bpage->datacount = size;
1248         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1249         return (bpage->busaddr);
1250 }
1251
1252 static void
1253 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1254 {
1255         struct bus_dmamap *map;
1256         struct bounce_zone *bz;
1257         bool schedule_swi;
1258
1259         bz = dmat->bounce_zone;
1260         bpage->datavaddr = 0;
1261         bpage->datacount = 0;
1262         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1263                 /*
1264                  * Reset the bounce page to start at offset 0.  Other uses
1265                  * of this bounce page may need to store a full page of
1266                  * data and/or assume it starts on a page boundary.
1267                  */
1268                 bpage->vaddr &= ~PAGE_MASK;
1269                 bpage->busaddr &= ~PAGE_MASK;
1270         }
1271
1272         schedule_swi = false;
1273         mtx_lock(&bounce_lock);
1274         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1275         bz->free_bpages++;
1276         bz->active_bpages--;
1277         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1278                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1279                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1280                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1281                             map, links);
1282                         bz->total_deferred++;
1283                         schedule_swi = true;
1284                 }
1285         }
1286         mtx_unlock(&bounce_lock);
1287         if (schedule_swi)
1288                 swi_sched(busdma_ih, 0);
1289 }
1290
1291 static void
1292 busdma_swi(void *dummy __unused)
1293 {
1294         bus_dma_tag_t dmat;
1295         struct bus_dmamap *map;
1296
1297         mtx_lock(&bounce_lock);
1298         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1299                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1300                 mtx_unlock(&bounce_lock);
1301                 dmat = map->dmat;
1302                 (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
1303                 bus_dmamap_load_mem(map->dmat, map, &map->mem,
1304                     map->callback, map->callback_arg, BUS_DMA_WAITOK);
1305                 (dmat->common.lockfunc)(dmat->common.lockfuncarg,
1306                     BUS_DMA_UNLOCK);
1307                 mtx_lock(&bounce_lock);
1308         }
1309         mtx_unlock(&bounce_lock);
1310 }
1311
1312 static void
1313 start_busdma_swi(void *dummy __unused)
1314 {
1315         if (swi_add(NULL, "busdma", busdma_swi, NULL, SWI_BUSDMA, INTR_MPSAFE,
1316             &busdma_ih))
1317                 panic("died while creating busdma swi ithread");
1318 }
1319 SYSINIT(start_busdma_swi, SI_SUB_SOFTINTR, SI_ORDER_ANY, start_busdma_swi,
1320     NULL);
1321
1322 struct bus_dma_impl bus_dma_bounce_impl = {
1323         .tag_create = bounce_bus_dma_tag_create,
1324         .tag_destroy = bounce_bus_dma_tag_destroy,
1325         .map_create = bounce_bus_dmamap_create,
1326         .map_destroy = bounce_bus_dmamap_destroy,
1327         .mem_alloc = bounce_bus_dmamem_alloc,
1328         .mem_free = bounce_bus_dmamem_free,
1329         .load_phys = bounce_bus_dmamap_load_phys,
1330         .load_buffer = bounce_bus_dmamap_load_buffer,
1331         .load_ma = bus_dmamap_load_ma_triv,
1332         .map_waitok = bounce_bus_dmamap_waitok,
1333         .map_complete = bounce_bus_dmamap_complete,
1334         .map_unload = bounce_bus_dmamap_unload,
1335         .map_sync = bounce_bus_dmamap_sync
1336 };