]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/busdma_machdep.c
This commit was generated by cvs2svn to compensate for changes in r56746,
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / busdma_machdep.c
1 /*
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32
33 #include <vm/vm.h>
34 #include <vm/vm_page.h>
35
36 #include <machine/bus.h>
37 #include <machine/md_var.h>
38
39 #define MAX(a,b) (((a) > (b)) ? (a) : (b))
40 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
41 #define MAX_BPAGES 128
42
43 struct bus_dma_tag {
44         bus_dma_tag_t     parent;
45         bus_size_t        alignment;
46         bus_size_t        boundary;
47         bus_addr_t        lowaddr;
48         bus_addr_t        highaddr;
49         bus_dma_filter_t *filter;
50         void             *filterarg;
51         bus_size_t        maxsize;
52         u_int             nsegments;
53         bus_size_t        maxsegsz;
54         int               flags;
55         int               ref_count;
56         int               map_count;
57 };
58
59 struct bounce_page {
60         vm_offset_t     vaddr;          /* kva of bounce buffer */
61         bus_addr_t      busaddr;        /* Physical address */
62         vm_offset_t     datavaddr;      /* kva of client data */
63         bus_size_t      datacount;      /* client data count */
64         STAILQ_ENTRY(bounce_page) links;
65 };
66
67 int busdma_swi_pending;
68
69 static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
70 static int free_bpages;
71 static int reserved_bpages;
72 static int active_bpages;
73 static int total_bpages;
74 static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
75
76 struct bus_dmamap {
77         struct bp_list         bpages;
78         int                    pagesneeded;
79         int                    pagesreserved;
80         bus_dma_tag_t          dmat;
81         void                  *buf;             /* unmapped buffer pointer */
82         bus_size_t             buflen;          /* unmapped buffer length */
83         bus_dmamap_callback_t *callback;
84         void                  *callback_arg;
85         STAILQ_ENTRY(bus_dmamap) links;
86 };
87
88 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
89 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
90 static struct bus_dmamap nobounce_dmamap;
91
92 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
93 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
94 static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
95                                    vm_offset_t vaddr, bus_size_t size);
96 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
97 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
98
99 static __inline int
100 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
101 {
102         int retval;
103
104         retval = 0;
105         do {
106                 if (paddr > dmat->lowaddr
107                  && paddr <= dmat->highaddr
108                  && (dmat->filter == NULL
109                   || (*dmat->filter)(dmat->filterarg, paddr) != 0))
110                         retval = 1;
111
112                 dmat = dmat->parent;            
113         } while (retval == 0 && dmat != NULL);
114         return (retval);
115 }
116
117 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
118 /*
119  * Allocate a device specific dma_tag.
120  */
121 int
122 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
123                    bus_size_t boundary, bus_addr_t lowaddr,
124                    bus_addr_t highaddr, bus_dma_filter_t *filter,
125                    void *filterarg, bus_size_t maxsize, int nsegments,
126                    bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
127 {
128         bus_dma_tag_t newtag;
129         int error = 0;
130
131         /* Return a NULL tag on failure */
132         *dmat = NULL;
133
134         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
135         if (newtag == NULL)
136                 return (ENOMEM);
137
138         newtag->parent = parent;
139         newtag->alignment = alignment;
140         newtag->boundary = boundary;
141         newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
142         newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
143         newtag->filter = filter;
144         newtag->filterarg = filterarg;
145         newtag->maxsize = maxsize;
146         newtag->nsegments = nsegments;
147         newtag->maxsegsz = maxsegsz;
148         newtag->flags = flags;
149         newtag->ref_count = 1; /* Count ourself */
150         newtag->map_count = 0;
151         
152         /* Take into account any restrictions imposed by our parent tag */
153         if (parent != NULL) {
154                 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
155                 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
156                 /*
157                  * XXX Not really correct??? Probably need to honor boundary
158                  *     all the way up the inheritence chain.
159                  */
160                 newtag->boundary = MAX(parent->boundary, newtag->boundary);
161                 if (newtag->filter == NULL) {
162                         /*
163                          * Short circuit looking at our parent directly
164                          * since we have encapsulated all of its information
165                          */
166                         newtag->filter = parent->filter;
167                         newtag->filterarg = parent->filterarg;
168                         newtag->parent = parent->parent;
169                 }
170                 if (newtag->parent != NULL) {
171                         parent->ref_count++;
172                 }
173         }
174         
175         if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
176                 /* Must bounce */
177
178                 if (lowaddr > bounce_lowaddr) {
179                         /*
180                          * Go through the pool and kill any pages
181                          * that don't reside below lowaddr.
182                          */
183                         panic("bus_dma_tag_create: page reallocation "
184                               "not implemented");
185                 }
186                 if (ptoa(total_bpages) < maxsize) {
187                         int pages;
188
189                         pages = atop(maxsize) - total_bpages;
190
191                         /* Add pages to our bounce pool */
192                         if (alloc_bounce_pages(newtag, pages) < pages)
193                                 error = ENOMEM;
194                 }
195                 /* Performed initial allocation */
196                 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
197         }
198         
199         if (error != 0) {
200                 free(newtag, M_DEVBUF);
201         } else {
202                 *dmat = newtag;
203         }
204         return (error);
205 }
206
207 int
208 bus_dma_tag_destroy(bus_dma_tag_t dmat)
209 {
210         if (dmat != NULL) {
211
212                 if (dmat->map_count != 0)
213                         return (EBUSY);
214
215                 while (dmat != NULL) {
216                         bus_dma_tag_t parent;
217
218                         parent = dmat->parent;
219                         dmat->ref_count--;
220                         if (dmat->ref_count == 0) {
221                                 free(dmat, M_DEVBUF);
222                                 /*
223                                  * Last reference count, so
224                                  * release our reference
225                                  * count on our parent.
226                                  */
227                                 dmat = parent;
228                         } else
229                                 dmat = NULL;
230                 }
231         }
232         return (0);
233 }
234
235 /*
236  * Allocate a handle for mapping from kva/uva/physical
237  * address space into bus device space.
238  */
239 int
240 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
241 {
242         int error;
243
244         error = 0;
245
246         if (dmat->lowaddr < ptoa(Maxmem)) {
247                 /* Must bounce */
248                 int maxpages;
249
250                 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
251                                              M_NOWAIT);
252                 if (*mapp == NULL) {
253                         return (ENOMEM);
254                 } else {
255                         /* Initialize the new map */
256                         bzero(*mapp, sizeof(**mapp));
257                         STAILQ_INIT(&((*mapp)->bpages));
258                 }
259                 /*
260                  * Attempt to add pages to our pool on a per-instance
261                  * basis up to a sane limit.
262                  */
263                 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
264                 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
265                  || (dmat->map_count > 0
266                   && total_bpages < maxpages)) {
267                         int pages;
268
269                         if (dmat->lowaddr > bounce_lowaddr) {
270                                 /*
271                                  * Go through the pool and kill any pages
272                                  * that don't reside below lowaddr.
273                                  */
274                                 panic("bus_dmamap_create: page reallocation "
275                                       "not implemented");
276                         }
277                         pages = atop(dmat->maxsize);
278                         pages = MIN(maxpages - total_bpages, pages);
279                         error = alloc_bounce_pages(dmat, pages);
280
281                         if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
282                                 if (error == 0)
283                                         dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
284                         } else {
285                                 error = 0;
286                         }
287                 }
288         } else {
289                 *mapp = NULL;
290         }
291         if (error == 0)
292                 dmat->map_count++;
293         return (error);
294 }
295
296 /*
297  * Destroy a handle for mapping from kva/uva/physical
298  * address space into bus device space.
299  */
300 int
301 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
302 {
303         if (map != NULL) {
304                 if (STAILQ_FIRST(&map->bpages) != NULL)
305                         return (EBUSY);
306                 free(map, M_DEVBUF);
307         }
308         dmat->map_count--;
309         return (0);
310 }
311
312
313 /*
314  * Allocate a piece of memory that can be efficiently mapped into
315  * bus device space based on the constraints lited in the dma tag.
316  * A dmamap to for use with dmamap_load is also allocated.
317  */
318 int
319 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
320                  bus_dmamap_t *mapp)
321 {
322         /* If we succeed, no mapping/bouncing will be required */
323         *mapp = NULL;
324
325         if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
326                 *vaddr = malloc(dmat->maxsize, M_DEVBUF,
327                                 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
328         } else {
329                 /*
330                  * XXX Use Contigmalloc until it is merged into this facility
331                  *     and handles multi-seg allocations.  Nobody is doing
332                  *     multi-seg allocations yet though.
333                  */
334                 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
335                     (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
336                     0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
337                     dmat->boundary);
338         }
339         if (*vaddr == NULL)
340                 return (ENOMEM);
341         return (0);
342 }
343
344 /*
345  * Free a piece of memory and it's allociated dmamap, that was allocated
346  * via bus_dmamem_alloc.
347  */
348 void
349 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
350 {
351         /*
352          * dmamem does not need to be bounced, so the map should be
353          * NULL
354          */
355         if (map != NULL)
356                 panic("bus_dmamem_free: Invalid map freed\n");
357         /* XXX There is no "contigfree" and "free" doesn't work */
358         if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem))
359                 free(vaddr, M_DEVBUF);
360 }
361
362 #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
363
364 /*
365  * Map the buffer buf into bus space using the dmamap map.
366  */
367 int
368 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
369                 bus_size_t buflen, bus_dmamap_callback_t *callback,
370                 void *callback_arg, int flags)
371 {
372         vm_offset_t             vaddr;
373         vm_offset_t             paddr;
374 #ifdef __GNUC__
375         bus_dma_segment_t       dm_segments[dmat->nsegments];
376 #else
377         bus_dma_segment_t       dm_segments[BUS_DMAMAP_NSEGS];
378 #endif
379         bus_dma_segment_t      *sg;
380         int                     seg;
381         int                     error;
382         vm_offset_t             nextpaddr;
383
384         if (map == NULL)
385                 map = &nobounce_dmamap;
386
387         error = 0;
388         /*
389          * If we are being called during a callback, pagesneeded will
390          * be non-zero, so we can avoid doing the work twice.
391          */
392         if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
393                 vm_offset_t     vendaddr;
394
395                 /*
396                  * Count the number of bounce pages
397                  * needed in order to complete this transfer
398                  */
399                 vaddr = trunc_page((vm_offset_t)buf);
400                 vendaddr = (vm_offset_t)buf + buflen;
401
402                 while (vaddr < vendaddr) {
403                         paddr = pmap_kextract(vaddr);
404                         if (run_filter(dmat, paddr) != 0) {
405
406                                 map->pagesneeded++;
407                         }
408                         vaddr += PAGE_SIZE;
409                 }
410         }
411
412         /* Reserve Necessary Bounce Pages */
413         if (map->pagesneeded != 0) {
414                 int s;
415
416                 s = splhigh();
417                 if (reserve_bounce_pages(dmat, map) != 0) {
418
419                         /* Queue us for resources */
420                         map->dmat = dmat;
421                         map->buf = buf;
422                         map->buflen = buflen;
423                         map->callback = callback;
424                         map->callback_arg = callback_arg;
425
426                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
427                         splx(s);
428
429                         return (EINPROGRESS);
430                 }
431                 splx(s);
432         }
433
434         vaddr = (vm_offset_t)buf;
435         sg = &dm_segments[0];
436         seg = 1;
437         sg->ds_len = 0;
438
439         nextpaddr = 0;
440         do {
441                 bus_size_t      size;
442
443                 paddr = pmap_kextract(vaddr);
444                 size = PAGE_SIZE - (paddr & PAGE_MASK);
445                 if (size > buflen)
446                         size = buflen;
447
448                 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
449                         paddr = add_bounce_page(dmat, map, vaddr, size);
450                 }
451
452                 if (sg->ds_len == 0) {
453                         sg->ds_addr = paddr;
454                         sg->ds_len = size;
455                 } else if (paddr == nextpaddr) {
456                         sg->ds_len += size;
457                 } else {
458                         /* Go to the next segment */
459                         sg++;
460                         seg++;
461                         if (seg > dmat->nsegments)
462                                 break;
463                         sg->ds_addr = paddr;
464                         sg->ds_len = size;
465                 }
466                 vaddr += size;
467                 nextpaddr = paddr + size;
468                 buflen -= size;
469
470         } while (buflen > 0);
471
472         if (buflen != 0) {
473                 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
474                        (u_long)buflen);
475                 error = EFBIG;
476         }
477
478         (*callback)(callback_arg, dm_segments, seg, error);
479
480         return (0);
481 }
482
483 /*
484  * Release the mapping held by map.
485  */
486 void
487 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
488 {
489         struct bounce_page *bpage;
490
491         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
492                 STAILQ_REMOVE_HEAD(&map->bpages, links);
493                 free_bounce_page(dmat, bpage);
494         }
495 }
496
497 void
498 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
499 {
500         struct bounce_page *bpage;
501
502         if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
503                 
504                 /*
505                  * Handle data bouncing.  We might also
506                  * want to add support for invalidating
507                  * the caches on broken hardware
508                  */
509                 switch (op) {
510                 case BUS_DMASYNC_PREWRITE:
511                         while (bpage != NULL) {
512                                 bcopy((void *)bpage->datavaddr,
513                                       (void *)bpage->vaddr,
514                                       bpage->datacount);
515                                 bpage = STAILQ_NEXT(bpage, links);
516                         }
517                         break;
518
519                 case BUS_DMASYNC_POSTREAD:
520                         while (bpage != NULL) {
521                                 bcopy((void *)bpage->vaddr,
522                                       (void *)bpage->datavaddr,
523                                       bpage->datacount);
524                                 bpage = STAILQ_NEXT(bpage, links);
525                         }
526                         break;
527                 case BUS_DMASYNC_PREREAD:
528                 case BUS_DMASYNC_POSTWRITE:
529                         /* No-ops */
530                         break;
531                 }
532         }
533 }
534
535 static int
536 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
537 {
538         int count;
539
540         count = 0;
541         if (total_bpages == 0) {
542                 STAILQ_INIT(&bounce_page_list);
543                 STAILQ_INIT(&bounce_map_waitinglist);
544                 STAILQ_INIT(&bounce_map_callbacklist);
545         }
546         
547         while (numpages > 0) {
548                 struct bounce_page *bpage;
549                 int s;
550
551                 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
552                                                      M_NOWAIT);
553
554                 if (bpage == NULL)
555                         break;
556                 bzero(bpage, sizeof(*bpage));
557                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
558                                                          M_NOWAIT, 0ul,
559                                                          dmat->lowaddr,
560                                                          PAGE_SIZE,
561                                                          0);
562                 if (bpage->vaddr == NULL) {
563                         free(bpage, M_DEVBUF);
564                         break;
565                 }
566                 bpage->busaddr = pmap_kextract(bpage->vaddr);
567                 s = splhigh();
568                 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
569                 total_bpages++;
570                 free_bpages++;
571                 splx(s);
572                 count++;
573                 numpages--;
574         }
575         return (count);
576 }
577
578 static int
579 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
580 {
581         int pages;
582
583         pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
584         free_bpages -= pages;
585         reserved_bpages += pages;
586         map->pagesreserved += pages;
587         pages = map->pagesneeded - map->pagesreserved;
588
589         return (pages);
590 }
591
592 static vm_offset_t
593 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
594                 bus_size_t size)
595 {
596         int s;
597         struct bounce_page *bpage;
598
599         if (map->pagesneeded == 0)
600                 panic("add_bounce_page: map doesn't need any pages");
601         map->pagesneeded--;
602
603         if (map->pagesreserved == 0)
604                 panic("add_bounce_page: map doesn't need any pages");
605         map->pagesreserved--;
606
607         s = splhigh();
608         bpage = STAILQ_FIRST(&bounce_page_list);
609         if (bpage == NULL)
610                 panic("add_bounce_page: free page list is empty");
611
612         STAILQ_REMOVE_HEAD(&bounce_page_list, links);
613         reserved_bpages--;
614         active_bpages++;
615         splx(s);
616
617         bpage->datavaddr = vaddr;
618         bpage->datacount = size;
619         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
620         return (bpage->busaddr);
621 }
622
623 static void
624 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
625 {
626         int s;
627         struct bus_dmamap *map;
628
629         bpage->datavaddr = 0;
630         bpage->datacount = 0;
631
632         s = splhigh();
633         STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
634         free_bpages++;
635         active_bpages--;
636         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
637                 if (reserve_bounce_pages(map->dmat, map) == 0) {
638                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
639                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
640                                            map, links);
641                         busdma_swi_pending = 1;
642                         setsoftvm();
643                 }
644         }
645         splx(s);
646 }
647
648 void
649 busdma_swi()
650 {
651         int s;
652         struct bus_dmamap *map;
653
654         s = splhigh();
655         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
656                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
657                 splx(s);
658                 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
659                                 map->callback, map->callback_arg, /*flags*/0);
660                 s = splhigh();
661         }
662         splx(s);
663 }