]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/busdma_bounce.c
Add a coherent flag on the arm64 dma map struct
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / busdma_bounce.c
1 /*-
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * Copyright (c) 2015-2016 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Andrew Turner
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Portions of this software were developed by Semihalf
10  * under sponsorship of the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions, and the following disclaimer,
17  *    without modification, immediately at the beginning of the file.
18  * 2. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/bus.h>
41 #include <sys/interrupt.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/proc.h>
46 #include <sys/memdesc.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/uio.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_map.h>
56
57 #include <machine/atomic.h>
58 #include <machine/bus.h>
59 #include <machine/md_var.h>
60 #include <arm64/include/bus_dma_impl.h>
61
62 #define MAX_BPAGES 4096
63
64 enum {
65         BF_COULD_BOUNCE         = 0x01,
66         BF_MIN_ALLOC_COMP       = 0x02,
67         BF_KMEM_ALLOC           = 0x04,
68         BF_COHERENT             = 0x10,
69 };
70
71 struct bounce_zone;
72
73 struct bus_dma_tag {
74         struct bus_dma_tag_common common;
75         int                     map_count;
76         int                     bounce_flags;
77         bus_dma_segment_t       *segments;
78         struct bounce_zone      *bounce_zone;
79 };
80
81 struct bounce_page {
82         vm_offset_t     vaddr;          /* kva of bounce buffer */
83         bus_addr_t      busaddr;        /* Physical address */
84         vm_offset_t     datavaddr;      /* kva of client data */
85         vm_page_t       datapage;       /* physical page of client data */
86         vm_offset_t     dataoffs;       /* page offset of client data */
87         bus_size_t      datacount;      /* client data count */
88         STAILQ_ENTRY(bounce_page) links;
89 };
90
91 int busdma_swi_pending;
92
93 struct bounce_zone {
94         STAILQ_ENTRY(bounce_zone) links;
95         STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
96         int             total_bpages;
97         int             free_bpages;
98         int             reserved_bpages;
99         int             active_bpages;
100         int             total_bounced;
101         int             total_deferred;
102         int             map_count;
103         bus_size_t      alignment;
104         bus_addr_t      lowaddr;
105         char            zoneid[8];
106         char            lowaddrid[20];
107         struct sysctl_ctx_list sysctl_tree;
108         struct sysctl_oid *sysctl_tree_top;
109 };
110
111 static struct mtx bounce_lock;
112 static int total_bpages;
113 static int busdma_zonecount;
114 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
115
116 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
117     "Busdma parameters");
118 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
119            "Total bounce pages");
120
121 struct sync_list {
122         vm_offset_t     vaddr;          /* kva of client data */
123         bus_addr_t      paddr;          /* physical address */
124         vm_page_t       pages;          /* starting page of client data */
125         bus_size_t      datacount;      /* client data count */
126 };
127
128 struct bus_dmamap {
129         struct bp_list         bpages;
130         int                    pagesneeded;
131         int                    pagesreserved;
132         bus_dma_tag_t          dmat;
133         struct memdesc         mem;
134         bus_dmamap_callback_t *callback;
135         void                  *callback_arg;
136         STAILQ_ENTRY(bus_dmamap) links;
137         u_int                   flags;
138 #define DMAMAP_COHERENT         (1 << 0)
139 #define DMAMAP_FROM_DMAMEM      (1 << 1)
140         int                     sync_count;
141         struct sync_list        slist[];
142 };
143
144 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
145 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
146
147 static void init_bounce_pages(void *dummy);
148 static int alloc_bounce_zone(bus_dma_tag_t dmat);
149 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
150 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
151     int commit);
152 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
153     vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
154 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
155 int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
156 static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf,
157     bus_size_t buflen, int *pagesneeded);
158 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
159     pmap_t pmap, void *buf, bus_size_t buflen, int flags);
160 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
161     vm_paddr_t buf, bus_size_t buflen, int flags);
162 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
163     int flags);
164
165 static bool
166 might_bounce(bus_dma_tag_t dmat)
167 {
168
169         if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0)
170                 return (true);
171
172         return (false);
173 }
174
175 static bool
176 must_bounce(bus_dma_tag_t dmat, bus_addr_t paddr)
177 {
178
179         if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0 &&
180             bus_dma_run_filter(&dmat->common, paddr))
181                 return (true);
182
183         return (false);
184 }
185
186 /*
187  * Allocate a device specific dma_tag.
188  */
189 static int
190 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
191     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
192     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
193     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
194     void *lockfuncarg, bus_dma_tag_t *dmat)
195 {
196         bus_dma_tag_t newtag;
197         int error;
198
199         *dmat = NULL;
200         error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
201             NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
202             maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
203             sizeof (struct bus_dma_tag), (void **)&newtag);
204         if (error != 0)
205                 return (error);
206
207         newtag->common.impl = &bus_dma_bounce_impl;
208         newtag->map_count = 0;
209         newtag->segments = NULL;
210
211         if ((flags & BUS_DMA_COHERENT) != 0)
212                 newtag->bounce_flags |= BF_COHERENT;
213
214         if (parent != NULL) {
215                 if ((newtag->common.filter != NULL ||
216                     (parent->bounce_flags & BF_COULD_BOUNCE) != 0))
217                         newtag->bounce_flags |= BF_COULD_BOUNCE;
218
219                 /* Copy some flags from the parent */
220                 newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT;
221         }
222
223         if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
224             newtag->common.alignment > 1)
225                 newtag->bounce_flags |= BF_COULD_BOUNCE;
226
227         if (((newtag->bounce_flags & BF_COULD_BOUNCE) != 0) &&
228             (flags & BUS_DMA_ALLOCNOW) != 0) {
229                 struct bounce_zone *bz;
230
231                 /* Must bounce */
232                 if ((error = alloc_bounce_zone(newtag)) != 0) {
233                         free(newtag, M_DEVBUF);
234                         return (error);
235                 }
236                 bz = newtag->bounce_zone;
237
238                 if (ptoa(bz->total_bpages) < maxsize) {
239                         int pages;
240
241                         pages = atop(round_page(maxsize)) - bz->total_bpages;
242
243                         /* Add pages to our bounce pool */
244                         if (alloc_bounce_pages(newtag, pages) < pages)
245                                 error = ENOMEM;
246                 }
247                 /* Performed initial allocation */
248                 newtag->bounce_flags |= BF_MIN_ALLOC_COMP;
249         } else
250                 error = 0;
251
252         if (error != 0)
253                 free(newtag, M_DEVBUF);
254         else
255                 *dmat = newtag;
256         CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
257             __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
258             error);
259         return (error);
260 }
261
262 static int
263 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
264 {
265         bus_dma_tag_t dmat_copy, parent;
266         int error;
267
268         error = 0;
269         dmat_copy = dmat;
270
271         if (dmat != NULL) {
272                 if (dmat->map_count != 0) {
273                         error = EBUSY;
274                         goto out;
275                 }
276                 while (dmat != NULL) {
277                         parent = (bus_dma_tag_t)dmat->common.parent;
278                         atomic_subtract_int(&dmat->common.ref_count, 1);
279                         if (dmat->common.ref_count == 0) {
280                                 if (dmat->segments != NULL)
281                                         free(dmat->segments, M_DEVBUF);
282                                 free(dmat, M_DEVBUF);
283                                 /*
284                                  * Last reference count, so
285                                  * release our reference
286                                  * count on our parent.
287                                  */
288                                 dmat = parent;
289                         } else
290                                 dmat = NULL;
291                 }
292         }
293 out:
294         CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
295         return (error);
296 }
297
298 static bool
299 bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
300 {
301
302         if (!might_bounce(dmat))
303                 return (true);
304         return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL));
305 }
306
307 static bus_dmamap_t
308 alloc_dmamap(bus_dma_tag_t dmat, int flags)
309 {
310         u_long mapsize;
311         bus_dmamap_t map;
312
313         mapsize = sizeof(*map);
314         mapsize += sizeof(struct sync_list) * dmat->common.nsegments;
315         map = malloc(mapsize, M_DEVBUF, flags | M_ZERO);
316         if (map == NULL)
317                 return (NULL);
318
319         /* Initialize the new map */
320         STAILQ_INIT(&map->bpages);
321
322         return (map);
323 }
324
325 /*
326  * Allocate a handle for mapping from kva/uva/physical
327  * address space into bus device space.
328  */
329 static int
330 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
331 {
332         struct bounce_zone *bz;
333         int error, maxpages, pages;
334
335         error = 0;
336
337         if (dmat->segments == NULL) {
338                 dmat->segments = (bus_dma_segment_t *)malloc(
339                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
340                     M_DEVBUF, M_NOWAIT);
341                 if (dmat->segments == NULL) {
342                         CTR3(KTR_BUSDMA, "%s: tag %p error %d",
343                             __func__, dmat, ENOMEM);
344                         return (ENOMEM);
345                 }
346         }
347
348         *mapp = alloc_dmamap(dmat, M_NOWAIT);
349         if (*mapp == NULL) {
350                 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
351                     __func__, dmat, ENOMEM);
352                 return (ENOMEM);
353         }
354
355         /*
356          * Bouncing might be required if the driver asks for an active
357          * exclusion region, a data alignment that is stricter than 1, and/or
358          * an active address boundary.
359          */
360         if (dmat->bounce_flags & BF_COULD_BOUNCE) {
361                 /* Must bounce */
362                 if (dmat->bounce_zone == NULL) {
363                         if ((error = alloc_bounce_zone(dmat)) != 0) {
364                                 free(*mapp, M_DEVBUF);
365                                 return (error);
366                         }
367                 }
368                 bz = dmat->bounce_zone;
369
370                 /*
371                  * Attempt to add pages to our pool on a per-instance
372                  * basis up to a sane limit.
373                  */
374                 if (dmat->common.alignment > 1)
375                         maxpages = MAX_BPAGES;
376                 else
377                         maxpages = MIN(MAX_BPAGES, Maxmem -
378                             atop(dmat->common.lowaddr));
379                 if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0 ||
380                     (bz->map_count > 0 && bz->total_bpages < maxpages)) {
381                         pages = MAX(atop(dmat->common.maxsize), 1);
382                         pages = MIN(maxpages - bz->total_bpages, pages);
383                         pages = MAX(pages, 1);
384                         if (alloc_bounce_pages(dmat, pages) < pages)
385                                 error = ENOMEM;
386                         if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP)
387                             == 0) {
388                                 if (error == 0) {
389                                         dmat->bounce_flags |=
390                                             BF_MIN_ALLOC_COMP;
391                                 }
392                         } else
393                                 error = 0;
394                 }
395                 bz->map_count++;
396         }
397         if (error == 0) {
398                 dmat->map_count++;
399                 if ((dmat->bounce_flags & BF_COHERENT) != 0)
400                         (*mapp)->flags |= DMAMAP_COHERENT;
401         } else {
402                 free(*mapp, M_DEVBUF);
403         }
404         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
405             __func__, dmat, dmat->common.flags, error);
406         return (error);
407 }
408
409 /*
410  * Destroy a handle for mapping from kva/uva/physical
411  * address space into bus device space.
412  */
413 static int
414 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
415 {
416
417         /* Check we are destroying the correct map type */
418         if ((map->flags & DMAMAP_FROM_DMAMEM) != 0)
419                 panic("bounce_bus_dmamap_destroy: Invalid map freed\n");
420
421         if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
422                 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY);
423                 return (EBUSY);
424         }
425         if (dmat->bounce_zone)
426                 dmat->bounce_zone->map_count--;
427         free(map, M_DEVBUF);
428         dmat->map_count--;
429         CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
430         return (0);
431 }
432
433 /*
434  * Allocate a piece of memory that can be efficiently mapped into
435  * bus device space based on the constraints lited in the dma tag.
436  * A dmamap to for use with dmamap_load is also allocated.
437  */
438 static int
439 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
440     bus_dmamap_t *mapp)
441 {
442         /*
443          * XXX ARM64TODO:
444          * This bus_dma implementation requires IO-Coherent architecutre.
445          * If IO-Coherency is not guaranteed, the BUS_DMA_COHERENT flag has
446          * to be implented using non-cacheable memory.
447          */
448
449         vm_memattr_t attr;
450         int mflags;
451
452         if (flags & BUS_DMA_NOWAIT)
453                 mflags = M_NOWAIT;
454         else
455                 mflags = M_WAITOK;
456
457         if (dmat->segments == NULL) {
458                 dmat->segments = (bus_dma_segment_t *)malloc(
459                     sizeof(bus_dma_segment_t) * dmat->common.nsegments,
460                     M_DEVBUF, mflags);
461                 if (dmat->segments == NULL) {
462                         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
463                             __func__, dmat, dmat->common.flags, ENOMEM);
464                         return (ENOMEM);
465                 }
466         }
467         if (flags & BUS_DMA_ZERO)
468                 mflags |= M_ZERO;
469         if (flags & BUS_DMA_NOCACHE)
470                 attr = VM_MEMATTR_UNCACHEABLE;
471         else if ((flags & BUS_DMA_COHERENT) != 0 &&
472             (dmat->bounce_flags & BF_COHERENT) == 0)
473                 /*
474                  * If we have a non-coherent tag, and are trying to allocate
475                  * a coherent block of memory it needs to be uncached.
476                  */
477                 attr = VM_MEMATTR_UNCACHEABLE;
478         else
479                 attr = VM_MEMATTR_DEFAULT;
480
481         /*
482          * Create the map, but don't set the could bounce flag as
483          * this allocation should never bounce;
484          */
485         *mapp = alloc_dmamap(dmat, mflags);
486         if (*mapp == NULL) {
487                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
488                     __func__, dmat, dmat->common.flags, ENOMEM);
489                 return (ENOMEM);
490         }
491
492         /*
493          * Mark the map as coherent if we used uncacheable memory or the
494          * tag was already marked as coherent.
495          */
496         if (attr == VM_MEMATTR_UNCACHEABLE ||
497             (dmat->bounce_flags & BF_COHERENT) != 0)
498                 (*mapp)->flags |= DMAMAP_COHERENT;
499
500         (*mapp)->flags |= DMAMAP_FROM_DMAMEM;
501
502         /*
503          * Allocate the buffer from the malloc(9) allocator if...
504          *  - It's small enough to fit into a single power of two sized bucket.
505          *  - The alignment is less than or equal to the maximum size
506          *  - The low address requirement is fulfilled.
507          * else allocate non-contiguous pages if...
508          *  - The page count that could get allocated doesn't exceed
509          *    nsegments also when the maximum segment size is less
510          *    than PAGE_SIZE.
511          *  - The alignment constraint isn't larger than a page boundary.
512          *  - There are no boundary-crossing constraints.
513          * else allocate a block of contiguous pages because one or more of the
514          * constraints is something that only the contig allocator can fulfill.
515          *
516          * NOTE: The (dmat->common.alignment <= dmat->maxsize) check
517          * below is just a quick hack. The exact alignment guarantees
518          * of malloc(9) need to be nailed down, and the code below
519          * should be rewritten to take that into account.
520          *
521          * In the meantime warn the user if malloc gets it wrong.
522          */
523         if ((dmat->common.maxsize <= PAGE_SIZE) &&
524            (dmat->common.alignment <= dmat->common.maxsize) &&
525             dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
526             attr == VM_MEMATTR_DEFAULT) {
527                 *vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
528         } else if (dmat->common.nsegments >=
529             howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
530             dmat->common.alignment <= PAGE_SIZE &&
531             (dmat->common.boundary % PAGE_SIZE) == 0) {
532                 /* Page-based multi-segment allocations allowed */
533                 *vaddr = (void *)kmem_alloc_attr(dmat->common.maxsize, mflags,
534                     0ul, dmat->common.lowaddr, attr);
535                 dmat->bounce_flags |= BF_KMEM_ALLOC;
536         } else {
537                 *vaddr = (void *)kmem_alloc_contig(dmat->common.maxsize, mflags,
538                     0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ?
539                     dmat->common.alignment : 1ul, dmat->common.boundary, attr);
540                 dmat->bounce_flags |= BF_KMEM_ALLOC;
541         }
542         if (*vaddr == NULL) {
543                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
544                     __func__, dmat, dmat->common.flags, ENOMEM);
545                 free(*mapp, M_DEVBUF);
546                 return (ENOMEM);
547         } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
548                 printf("bus_dmamem_alloc failed to align memory properly.\n");
549         }
550         dmat->map_count++;
551         CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
552             __func__, dmat, dmat->common.flags, 0);
553         return (0);
554 }
555
556 /*
557  * Free a piece of memory and it's allociated dmamap, that was allocated
558  * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
559  */
560 static void
561 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
562 {
563
564         /*
565          * Check the map came from bounce_bus_dmamem_alloc, so the map
566          * should be NULL and the BF_KMEM_ALLOC flag cleared if malloc()
567          * was used and set if kmem_alloc_contig() was used.
568          */
569         if ((map->flags & DMAMAP_FROM_DMAMEM) == 0)
570                 panic("bus_dmamem_free: Invalid map freed\n");
571         if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
572                 free(vaddr, M_DEVBUF);
573         else
574                 kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
575         free(map, M_DEVBUF);
576         dmat->map_count--;
577         CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
578             dmat->bounce_flags);
579 }
580
581 static bool
582 _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
583     int *pagesneeded)
584 {
585         bus_addr_t curaddr;
586         bus_size_t sgsize;
587         int count;
588
589         /*
590          * Count the number of bounce pages needed in order to
591          * complete this transfer
592          */
593         count = 0;
594         curaddr = buf;
595         while (buflen != 0) {
596                 sgsize = MIN(buflen, dmat->common.maxsegsz);
597                 if (must_bounce(dmat, curaddr)) {
598                         sgsize = MIN(sgsize,
599                             PAGE_SIZE - (curaddr & PAGE_MASK));
600                         if (pagesneeded == NULL)
601                                 return (true);
602                         count++;
603                 }
604                 curaddr += sgsize;
605                 buflen -= sgsize;
606         }
607
608         if (pagesneeded != NULL)
609                 *pagesneeded = count;
610         return (count != 0);
611 }
612
613 static void
614 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
615     bus_size_t buflen, int flags)
616 {
617
618         if (map->pagesneeded == 0) {
619                 _bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded);
620                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
621         }
622 }
623
624 static void
625 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
626     void *buf, bus_size_t buflen, int flags)
627 {
628         vm_offset_t vaddr;
629         vm_offset_t vendaddr;
630         bus_addr_t paddr;
631         bus_size_t sg_len;
632
633         if (map->pagesneeded == 0) {
634                 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
635                     "alignment= %d", dmat->common.lowaddr,
636                     ptoa((vm_paddr_t)Maxmem),
637                     dmat->common.boundary, dmat->common.alignment);
638                 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map,
639                     map->pagesneeded);
640                 /*
641                  * Count the number of bounce pages
642                  * needed in order to complete this transfer
643                  */
644                 vaddr = (vm_offset_t)buf;
645                 vendaddr = (vm_offset_t)buf + buflen;
646
647                 while (vaddr < vendaddr) {
648                         sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
649                         if (pmap == kernel_pmap)
650                                 paddr = pmap_kextract(vaddr);
651                         else
652                                 paddr = pmap_extract(pmap, vaddr);
653                         if (must_bounce(dmat, paddr)) {
654                                 sg_len = roundup2(sg_len,
655                                     dmat->common.alignment);
656                                 map->pagesneeded++;
657                         }
658                         vaddr += sg_len;
659                 }
660                 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
661         }
662 }
663
664 static int
665 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
666 {
667
668         /* Reserve Necessary Bounce Pages */
669         mtx_lock(&bounce_lock);
670         if (flags & BUS_DMA_NOWAIT) {
671                 if (reserve_bounce_pages(dmat, map, 0) != 0) {
672                         mtx_unlock(&bounce_lock);
673                         return (ENOMEM);
674                 }
675         } else {
676                 if (reserve_bounce_pages(dmat, map, 1) != 0) {
677                         /* Queue us for resources */
678                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
679                         mtx_unlock(&bounce_lock);
680                         return (EINPROGRESS);
681                 }
682         }
683         mtx_unlock(&bounce_lock);
684
685         return (0);
686 }
687
688 /*
689  * Add a single contiguous physical range to the segment list.
690  */
691 static bus_size_t
692 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
693     bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
694 {
695         bus_addr_t baddr, bmask;
696         int seg;
697
698         /*
699          * Make sure we don't cross any boundaries.
700          */
701         bmask = ~(dmat->common.boundary - 1);
702         if (dmat->common.boundary > 0) {
703                 baddr = (curaddr + dmat->common.boundary) & bmask;
704                 if (sgsize > (baddr - curaddr))
705                         sgsize = (baddr - curaddr);
706         }
707
708         /*
709          * Insert chunk into a segment, coalescing with
710          * previous segment if possible.
711          */
712         seg = *segp;
713         if (seg == -1) {
714                 seg = 0;
715                 segs[seg].ds_addr = curaddr;
716                 segs[seg].ds_len = sgsize;
717         } else {
718                 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
719                     (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
720                     (dmat->common.boundary == 0 ||
721                      (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
722                         segs[seg].ds_len += sgsize;
723                 else {
724                         if (++seg >= dmat->common.nsegments)
725                                 return (0);
726                         segs[seg].ds_addr = curaddr;
727                         segs[seg].ds_len = sgsize;
728                 }
729         }
730         *segp = seg;
731         return (sgsize);
732 }
733
734 /*
735  * Utility function to load a physical buffer.  segp contains
736  * the starting segment on entrace, and the ending segment on exit.
737  */
738 static int
739 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
740     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
741     int *segp)
742 {
743         struct sync_list *sl;
744         bus_size_t sgsize;
745         bus_addr_t curaddr, sl_end;
746         int error;
747
748         if (segs == NULL)
749                 segs = dmat->segments;
750
751         if (might_bounce(dmat)) {
752                 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
753                 if (map->pagesneeded != 0) {
754                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
755                         if (error)
756                                 return (error);
757                 }
758         }
759
760         sl = map->slist + map->sync_count - 1;
761         sl_end = 0;
762
763         while (buflen > 0) {
764                 curaddr = buf;
765                 sgsize = MIN(buflen, dmat->common.maxsegsz);
766                 if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
767                         sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
768                         curaddr = add_bounce_page(dmat, map, 0, curaddr,
769                             sgsize);
770                 } else if ((map->flags & DMAMAP_COHERENT) == 0) {
771                         if (map->sync_count > 0)
772                                 sl_end = sl->paddr + sl->datacount;
773
774                         if (map->sync_count == 0 || curaddr != sl_end) {
775                                 if (++map->sync_count > dmat->common.nsegments)
776                                         break;
777                                 sl++;
778                                 sl->vaddr = 0;
779                                 sl->paddr = curaddr;
780                                 sl->datacount = sgsize;
781                                 sl->pages = PHYS_TO_VM_PAGE(curaddr);
782                                 KASSERT(sl->pages != NULL,
783                                     ("%s: page at PA:0x%08lx is not in "
784                                     "vm_page_array", __func__, curaddr));
785                         } else
786                                 sl->datacount += sgsize;
787                 }
788                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
789                     segp);
790                 if (sgsize == 0)
791                         break;
792                 buf += sgsize;
793                 buflen -= sgsize;
794         }
795
796         /*
797          * Did we fit?
798          */
799         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
800 }
801
802 /*
803  * Utility function to load a linear buffer.  segp contains
804  * the starting segment on entrace, and the ending segment on exit.
805  */
806 static int
807 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
808     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
809     int *segp)
810 {
811         struct sync_list *sl;
812         bus_size_t sgsize, max_sgsize;
813         bus_addr_t curaddr, sl_pend;
814         vm_offset_t kvaddr, vaddr, sl_vend;
815         int error;
816
817         if (segs == NULL)
818                 segs = dmat->segments;
819
820         if (might_bounce(dmat)) {
821                 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
822                 if (map->pagesneeded != 0) {
823                         error = _bus_dmamap_reserve_pages(dmat, map, flags);
824                         if (error)
825                                 return (error);
826                 }
827         }
828
829         sl = map->slist + map->sync_count - 1;
830         vaddr = (vm_offset_t)buf;
831         sl_pend = 0;
832         sl_vend = 0;
833
834         while (buflen > 0) {
835                 /*
836                  * Get the physical address for this segment.
837                  */
838                 if (pmap == kernel_pmap) {
839                         curaddr = pmap_kextract(vaddr);
840                         kvaddr = vaddr;
841                 } else {
842                         curaddr = pmap_extract(pmap, vaddr);
843                         kvaddr = 0;
844                 }
845
846                 /*
847                  * Compute the segment size, and adjust counts.
848                  */
849                 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
850                 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
851                 if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
852                         sgsize = roundup2(sgsize, dmat->common.alignment);
853                         sgsize = MIN(sgsize, max_sgsize);
854                         curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
855                             sgsize);
856                 } else if ((map->flags & DMAMAP_COHERENT) == 0) {
857                         sgsize = MIN(sgsize, max_sgsize);
858                         if (map->sync_count > 0) {
859                                 sl_pend = sl->paddr + sl->datacount;
860                                 sl_vend = sl->vaddr + sl->datacount;
861                         }
862
863                         if (map->sync_count == 0 ||
864                             (kvaddr != 0 && kvaddr != sl_vend) ||
865                             (curaddr != sl_pend)) {
866                                 if (++map->sync_count > dmat->common.nsegments)
867                                         goto cleanup;
868                                 sl++;
869                                 sl->vaddr = kvaddr;
870                                 sl->paddr = curaddr;
871                                 if (kvaddr != 0) {
872                                         sl->pages = NULL;
873                                 } else {
874                                         sl->pages = PHYS_TO_VM_PAGE(curaddr);
875                                         KASSERT(sl->pages != NULL,
876                                             ("%s: page at PA:0x%08lx is not "
877                                             "in vm_page_array", __func__,
878                                             curaddr));
879                                 }
880                                 sl->datacount = sgsize;
881                         } else
882                                 sl->datacount += sgsize;
883                 } else {
884                         sgsize = MIN(sgsize, max_sgsize);
885                 }
886                 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
887                     segp);
888                 if (sgsize == 0)
889                         break;
890                 vaddr += sgsize;
891                 buflen -= sgsize;
892         }
893
894 cleanup:
895         /*
896          * Did we fit?
897          */
898         return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
899 }
900
901 static void
902 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
903     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
904 {
905
906         map->mem = *mem;
907         map->dmat = dmat;
908         map->callback = callback;
909         map->callback_arg = callback_arg;
910 }
911
912 static bus_dma_segment_t *
913 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
914     bus_dma_segment_t *segs, int nsegs, int error)
915 {
916
917         if (segs == NULL)
918                 segs = dmat->segments;
919         return (segs);
920 }
921
922 /*
923  * Release the mapping held by map.
924  */
925 static void
926 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
927 {
928         struct bounce_page *bpage;
929
930         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
931                 STAILQ_REMOVE_HEAD(&map->bpages, links);
932                 free_bounce_page(dmat, bpage);
933         }
934
935         map->sync_count = 0;
936 }
937
938 static void
939 dma_preread_safe(vm_offset_t va, vm_size_t size)
940 {
941         /*
942          * Write back any partial cachelines immediately before and
943          * after the DMA region.
944          */
945         if (va & (dcache_line_size - 1))
946                 cpu_dcache_wb_range(va, 1);
947         if ((va + size) & (dcache_line_size - 1))
948                 cpu_dcache_wb_range(va + size, 1);
949
950         cpu_dcache_inv_range(va, size);
951 }
952
953 static void
954 dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
955 {
956         uint32_t len, offset;
957         vm_page_t m;
958         vm_paddr_t pa;
959         vm_offset_t va, tempva;
960         bus_size_t size;
961
962         offset = sl->paddr & PAGE_MASK;
963         m = sl->pages;
964         size = sl->datacount;
965         pa = sl->paddr;
966
967         for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
968                 tempva = 0;
969                 if (sl->vaddr == 0) {
970                         len = min(PAGE_SIZE - offset, size);
971                         tempva = pmap_quick_enter_page(m);
972                         va = tempva | offset;
973                         KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
974                             ("unexpected vm_page_t phys: 0x%16lx != 0x%16lx",
975                             VM_PAGE_TO_PHYS(m) | offset, pa));
976                 } else {
977                         len = sl->datacount;
978                         va = sl->vaddr;
979                 }
980
981                 switch (op) {
982                 case BUS_DMASYNC_PREWRITE:
983                 case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
984                         cpu_dcache_wb_range(va, len);
985                         break;
986                 case BUS_DMASYNC_PREREAD:
987                         /*
988                          * An mbuf may start in the middle of a cacheline. There
989                          * will be no cpu writes to the beginning of that line
990                          * (which contains the mbuf header) while dma is in
991                          * progress.  Handle that case by doing a writeback of
992                          * just the first cacheline before invalidating the
993                          * overall buffer.  Any mbuf in a chain may have this
994                          * misalignment.  Buffers which are not mbufs bounce if
995                          * they are not aligned to a cacheline.
996                          */
997                         dma_preread_safe(va, len);
998                         break;
999                 case BUS_DMASYNC_POSTREAD:
1000                 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1001                         cpu_dcache_inv_range(va, len);
1002                         break;
1003                 default:
1004                         panic("unsupported combination of sync operations: "
1005                               "0x%08x\n", op);
1006                 }
1007
1008                 if (tempva != 0)
1009                         pmap_quick_remove_page(tempva);
1010         }
1011 }
1012
1013 static void
1014 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
1015     bus_dmasync_op_t op)
1016 {
1017         struct bounce_page *bpage;
1018         struct sync_list *sl, *end;
1019         vm_offset_t datavaddr, tempvaddr;
1020
1021         if (op == BUS_DMASYNC_POSTWRITE)
1022                 return;
1023
1024         if ((op & BUS_DMASYNC_POSTREAD) != 0) {
1025                 /*
1026                  * Wait for any DMA operations to complete before the bcopy.
1027                  */
1028                 dsb(sy);
1029         }
1030
1031         if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1032                 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
1033                     "performing bounce", __func__, dmat, dmat->common.flags,
1034                     op);
1035
1036                 if ((op & BUS_DMASYNC_PREWRITE) != 0) {
1037                         while (bpage != NULL) {
1038                                 tempvaddr = 0;
1039                                 datavaddr = bpage->datavaddr;
1040                                 if (datavaddr == 0) {
1041                                         tempvaddr = pmap_quick_enter_page(
1042                                             bpage->datapage);
1043                                         datavaddr = tempvaddr | bpage->dataoffs;
1044                                 }
1045
1046                                 bcopy((void *)datavaddr,
1047                                     (void *)bpage->vaddr, bpage->datacount);
1048                                 if (tempvaddr != 0)
1049                                         pmap_quick_remove_page(tempvaddr);
1050                                 if ((map->flags & DMAMAP_COHERENT) == 0)
1051                                         cpu_dcache_wb_range(bpage->vaddr,
1052                                             bpage->datacount);
1053                                 bpage = STAILQ_NEXT(bpage, links);
1054                         }
1055                         dmat->bounce_zone->total_bounced++;
1056                 } else if ((op & BUS_DMASYNC_PREREAD) != 0) {
1057                         while (bpage != NULL) {
1058                                 if ((map->flags & DMAMAP_COHERENT) == 0)
1059                                         cpu_dcache_wbinv_range(bpage->vaddr,
1060                                             bpage->datacount);
1061                                 bpage = STAILQ_NEXT(bpage, links);
1062                         }
1063                 }
1064
1065                 if ((op & BUS_DMASYNC_POSTREAD) != 0) {
1066                         while (bpage != NULL) {
1067                                 if ((map->flags & DMAMAP_COHERENT) == 0)
1068                                         cpu_dcache_inv_range(bpage->vaddr,
1069                                             bpage->datacount);
1070                                 tempvaddr = 0;
1071                                 datavaddr = bpage->datavaddr;
1072                                 if (datavaddr == 0) {
1073                                         tempvaddr = pmap_quick_enter_page(
1074                                             bpage->datapage);
1075                                         datavaddr = tempvaddr | bpage->dataoffs;
1076                                 }
1077
1078                                 bcopy((void *)bpage->vaddr,
1079                                     (void *)datavaddr, bpage->datacount);
1080
1081                                 if (tempvaddr != 0)
1082                                         pmap_quick_remove_page(tempvaddr);
1083                                 bpage = STAILQ_NEXT(bpage, links);
1084                         }
1085                         dmat->bounce_zone->total_bounced++;
1086                 }
1087         }
1088
1089         /*
1090          * Cache maintenance for normal (non-COHERENT non-bounce) buffers.
1091          */
1092         if (map->sync_count != 0) {
1093                 sl = &map->slist[0];
1094                 end = &map->slist[map->sync_count];
1095                 CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x "
1096                     "performing sync", __func__, dmat, op);
1097
1098                 for ( ; sl != end; ++sl)
1099                         dma_dcache_sync(sl, op);
1100         }
1101
1102         if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) {
1103                 /*
1104                  * Wait for the bcopy to complete before any DMA operations.
1105                  */
1106                 dsb(sy);
1107         }
1108 }
1109
1110 static void
1111 init_bounce_pages(void *dummy __unused)
1112 {
1113
1114         total_bpages = 0;
1115         STAILQ_INIT(&bounce_zone_list);
1116         STAILQ_INIT(&bounce_map_waitinglist);
1117         STAILQ_INIT(&bounce_map_callbacklist);
1118         mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1119 }
1120 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1121
1122 static struct sysctl_ctx_list *
1123 busdma_sysctl_tree(struct bounce_zone *bz)
1124 {
1125
1126         return (&bz->sysctl_tree);
1127 }
1128
1129 static struct sysctl_oid *
1130 busdma_sysctl_tree_top(struct bounce_zone *bz)
1131 {
1132
1133         return (bz->sysctl_tree_top);
1134 }
1135
1136 static int
1137 alloc_bounce_zone(bus_dma_tag_t dmat)
1138 {
1139         struct bounce_zone *bz;
1140
1141         /* Check to see if we already have a suitable zone */
1142         STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1143                 if ((dmat->common.alignment <= bz->alignment) &&
1144                     (dmat->common.lowaddr >= bz->lowaddr)) {
1145                         dmat->bounce_zone = bz;
1146                         return (0);
1147                 }
1148         }
1149
1150         if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1151             M_NOWAIT | M_ZERO)) == NULL)
1152                 return (ENOMEM);
1153
1154         STAILQ_INIT(&bz->bounce_page_list);
1155         bz->free_bpages = 0;
1156         bz->reserved_bpages = 0;
1157         bz->active_bpages = 0;
1158         bz->lowaddr = dmat->common.lowaddr;
1159         bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
1160         bz->map_count = 0;
1161         snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1162         busdma_zonecount++;
1163         snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1164         STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1165         dmat->bounce_zone = bz;
1166
1167         sysctl_ctx_init(&bz->sysctl_tree);
1168         bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1169             SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1170             CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
1171         if (bz->sysctl_tree_top == NULL) {
1172                 sysctl_ctx_free(&bz->sysctl_tree);
1173                 return (0);     /* XXX error code? */
1174         }
1175
1176         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1177             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1178             "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1179             "Total bounce pages");
1180         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1181             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1182             "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1183             "Free bounce pages");
1184         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1185             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1186             "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1187             "Reserved bounce pages");
1188         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1189             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1190             "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1191             "Active bounce pages");
1192         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1193             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1194             "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1195             "Total bounce requests");
1196         SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1197             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1198             "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1199             "Total bounce requests that were deferred");
1200         SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1201             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1202             "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1203         SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1204             SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1205             "alignment", CTLFLAG_RD, &bz->alignment, "");
1206
1207         return (0);
1208 }
1209
1210 static int
1211 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1212 {
1213         struct bounce_zone *bz;
1214         int count;
1215
1216         bz = dmat->bounce_zone;
1217         count = 0;
1218         while (numpages > 0) {
1219                 struct bounce_page *bpage;
1220
1221                 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1222                                                      M_NOWAIT | M_ZERO);
1223
1224                 if (bpage == NULL)
1225                         break;
1226                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1227                     M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
1228                 if (bpage->vaddr == 0) {
1229                         free(bpage, M_DEVBUF);
1230                         break;
1231                 }
1232                 bpage->busaddr = pmap_kextract(bpage->vaddr);
1233                 mtx_lock(&bounce_lock);
1234                 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1235                 total_bpages++;
1236                 bz->total_bpages++;
1237                 bz->free_bpages++;
1238                 mtx_unlock(&bounce_lock);
1239                 count++;
1240                 numpages--;
1241         }
1242         return (count);
1243 }
1244
1245 static int
1246 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1247 {
1248         struct bounce_zone *bz;
1249         int pages;
1250
1251         mtx_assert(&bounce_lock, MA_OWNED);
1252         bz = dmat->bounce_zone;
1253         pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1254         if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1255                 return (map->pagesneeded - (map->pagesreserved + pages));
1256         bz->free_bpages -= pages;
1257         bz->reserved_bpages += pages;
1258         map->pagesreserved += pages;
1259         pages = map->pagesneeded - map->pagesreserved;
1260
1261         return (pages);
1262 }
1263
1264 static bus_addr_t
1265 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1266                 bus_addr_t addr, bus_size_t size)
1267 {
1268         struct bounce_zone *bz;
1269         struct bounce_page *bpage;
1270
1271         KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1272
1273         bz = dmat->bounce_zone;
1274         if (map->pagesneeded == 0)
1275                 panic("add_bounce_page: map doesn't need any pages");
1276         map->pagesneeded--;
1277
1278         if (map->pagesreserved == 0)
1279                 panic("add_bounce_page: map doesn't need any pages");
1280         map->pagesreserved--;
1281
1282         mtx_lock(&bounce_lock);
1283         bpage = STAILQ_FIRST(&bz->bounce_page_list);
1284         if (bpage == NULL)
1285                 panic("add_bounce_page: free page list is empty");
1286
1287         STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1288         bz->reserved_bpages--;
1289         bz->active_bpages++;
1290         mtx_unlock(&bounce_lock);
1291
1292         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1293                 /* Page offset needs to be preserved. */
1294                 bpage->vaddr |= addr & PAGE_MASK;
1295                 bpage->busaddr |= addr & PAGE_MASK;
1296         }
1297         bpage->datavaddr = vaddr;
1298         bpage->datapage = PHYS_TO_VM_PAGE(addr);
1299         bpage->dataoffs = addr & PAGE_MASK;
1300         bpage->datacount = size;
1301         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1302         return (bpage->busaddr);
1303 }
1304
1305 static void
1306 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1307 {
1308         struct bus_dmamap *map;
1309         struct bounce_zone *bz;
1310
1311         bz = dmat->bounce_zone;
1312         bpage->datavaddr = 0;
1313         bpage->datacount = 0;
1314         if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1315                 /*
1316                  * Reset the bounce page to start at offset 0.  Other uses
1317                  * of this bounce page may need to store a full page of
1318                  * data and/or assume it starts on a page boundary.
1319                  */
1320                 bpage->vaddr &= ~PAGE_MASK;
1321                 bpage->busaddr &= ~PAGE_MASK;
1322         }
1323
1324         mtx_lock(&bounce_lock);
1325         STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1326         bz->free_bpages++;
1327         bz->active_bpages--;
1328         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1329                 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1330                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1331                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1332                             map, links);
1333                         busdma_swi_pending = 1;
1334                         bz->total_deferred++;
1335                         swi_sched(vm_ih, 0);
1336                 }
1337         }
1338         mtx_unlock(&bounce_lock);
1339 }
1340
1341 void
1342 busdma_swi(void)
1343 {
1344         bus_dma_tag_t dmat;
1345         struct bus_dmamap *map;
1346
1347         mtx_lock(&bounce_lock);
1348         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1349                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1350                 mtx_unlock(&bounce_lock);
1351                 dmat = map->dmat;
1352                 (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
1353                 bus_dmamap_load_mem(map->dmat, map, &map->mem,
1354                     map->callback, map->callback_arg, BUS_DMA_WAITOK);
1355                 (dmat->common.lockfunc)(dmat->common.lockfuncarg,
1356                     BUS_DMA_UNLOCK);
1357                 mtx_lock(&bounce_lock);
1358         }
1359         mtx_unlock(&bounce_lock);
1360 }
1361
1362 struct bus_dma_impl bus_dma_bounce_impl = {
1363         .tag_create = bounce_bus_dma_tag_create,
1364         .tag_destroy = bounce_bus_dma_tag_destroy,
1365         .id_mapped = bounce_bus_dma_id_mapped,
1366         .map_create = bounce_bus_dmamap_create,
1367         .map_destroy = bounce_bus_dmamap_destroy,
1368         .mem_alloc = bounce_bus_dmamem_alloc,
1369         .mem_free = bounce_bus_dmamem_free,
1370         .load_phys = bounce_bus_dmamap_load_phys,
1371         .load_buffer = bounce_bus_dmamap_load_buffer,
1372         .load_ma = bus_dmamap_load_ma_triv,
1373         .map_waitok = bounce_bus_dmamap_waitok,
1374         .map_complete = bounce_bus_dmamap_complete,
1375         .map_unload = bounce_bus_dmamap_unload,
1376         .map_sync = bounce_bus_dmamap_sync
1377 };