]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/busdma_machdep.c
Fixed two potentially serious classes of bugs:
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / busdma_machdep.c
1 /*
2  * Copyright (c) 1997, 1998 Justin T. Gibbs.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  *      $Id: busdma_machdep.c,v 1.10 1998/10/07 03:38:14 gibbs Exp $
27  */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32
33 #include <vm/vm.h>
34 #include <vm/vm_prot.h>
35 #include <vm/vm_page.h>
36
37 #include <machine/bus.h>
38 #include <machine/md_var.h>
39
40 #define MAX(a,b) (((a) > (b)) ? (a) : (b))
41 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
42 #define MAX_BPAGES 128
43
44 struct bus_dma_tag {
45         bus_dma_tag_t     parent;
46         bus_size_t        alignment;
47         bus_size_t        boundary;
48         bus_addr_t        lowaddr;
49         bus_addr_t        highaddr;
50         bus_dma_filter_t *filter;
51         void             *filterarg;
52         bus_size_t        maxsize;
53         u_int             nsegments;
54         bus_size_t        maxsegsz;
55         int               flags;
56         int               ref_count;
57         int               map_count;
58 };
59
60 struct bounce_page {
61         vm_offset_t     vaddr;          /* kva of bounce buffer */
62         bus_addr_t      busaddr;        /* Physical address */
63         vm_offset_t     datavaddr;      /* kva of client data */
64         bus_size_t      datacount;      /* client data count */
65         STAILQ_ENTRY(bounce_page) links;
66 };
67
68 int busdma_swi_pending;
69
70 static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
71 static int free_bpages;
72 static int reserved_bpages;
73 static int active_bpages;
74 static int total_bpages;
75 static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
76
77 struct bus_dmamap {
78         struct bp_list         bpages;
79         int                    pagesneeded;
80         int                    pagesreserved;
81         bus_dma_tag_t          dmat;
82         void                  *buf;             /* unmapped buffer pointer */
83         bus_size_t             buflen;          /* unmapped buffer length */
84         bus_dmamap_callback_t *callback;
85         void                  *callback_arg;
86         STAILQ_ENTRY(bus_dmamap) links;
87 };
88
89 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
90 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
91 static struct bus_dmamap nobounce_dmamap;
92
93 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
94 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
95 static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
96                                    vm_offset_t vaddr, bus_size_t size);
97 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
98 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
99
100 static __inline int
101 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
102 {
103         int retval;
104
105         retval = 0;
106         do {
107                 if (paddr > dmat->lowaddr
108                  && paddr <= dmat->highaddr
109                  && (dmat->filter == NULL
110                   || (*dmat->filter)(dmat->filterarg, paddr) != 0))
111                         retval = 1;
112
113                 dmat = dmat->parent;            
114         } while (retval == 0 && dmat != NULL);
115         return (retval);
116 }
117
118 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
119 /*
120  * Allocate a device specific dma_tag.
121  */
122 int
123 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
124                    bus_size_t boundary, bus_addr_t lowaddr,
125                    bus_addr_t highaddr, bus_dma_filter_t *filter,
126                    void *filterarg, bus_size_t maxsize, int nsegments,
127                    bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
128 {
129         bus_dma_tag_t newtag;
130         int error = 0;
131
132         /* Return a NULL tag on failure */
133         *dmat = NULL;
134
135         newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
136         if (newtag == NULL)
137                 return (ENOMEM);
138
139         newtag->parent = parent;
140         newtag->boundary = boundary;
141         newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
142         newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
143         newtag->filter = filter;
144         newtag->filterarg = filterarg;
145         newtag->maxsize = maxsize;
146         newtag->nsegments = nsegments;
147         newtag->maxsegsz = maxsegsz;
148         newtag->flags = flags;
149         newtag->ref_count = 1; /* Count ourself */
150         newtag->map_count = 0;
151         
152         /* Take into account any restrictions imposed by our parent tag */
153         if (parent != NULL) {
154                 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
155                 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
156                 /*
157                  * XXX Not really correct??? Probably need to honor boundary
158                  *     all the way up the inheritence chain.
159                  */
160                 newtag->boundary = MAX(parent->boundary, newtag->boundary);
161                 if (newtag->filter == NULL) {
162                         /*
163                          * Short circuit looking at our parent directly
164                          * since we have encapsulated all of its information
165                          */
166                         newtag->filter = parent->filter;
167                         newtag->filterarg = parent->filterarg;
168                         newtag->parent = parent->parent;
169                 }
170                 if (newtag->parent != NULL) {
171                         parent->ref_count++;
172                 }
173         }
174         
175         if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
176                 /* Must bounce */
177
178                 if (lowaddr > bounce_lowaddr) {
179                         /*
180                          * Go through the pool and kill any pages
181                          * that don't reside below lowaddr.
182                          */
183                         panic("bus_dma_tag_create: page reallocation "
184                               "not implemented");
185                 }
186                 if (ptoa(total_bpages) < maxsize) {
187                         int pages;
188
189                         pages = atop(maxsize) - total_bpages;
190
191                         /* Add pages to our bounce pool */
192                         if (alloc_bounce_pages(newtag, pages) < pages)
193                                 error = ENOMEM;
194                 }
195                 /* Performed initial allocation */
196                 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
197         }
198         
199         if (error != 0) {
200                 free(newtag, M_DEVBUF);
201         } else {
202                 *dmat = newtag;
203         }
204         return (error);
205 }
206
207 int
208 bus_dma_tag_destroy(bus_dma_tag_t dmat)
209 {
210         if (dmat != NULL) {
211
212                 if (dmat->map_count != 0)
213                         return (EBUSY);
214
215                 while (dmat != NULL) {
216                         bus_dma_tag_t parent;
217
218                         parent = dmat->parent;
219                         dmat->ref_count--;
220                         if (dmat->ref_count == 0) {
221                                 free(dmat, M_DEVBUF);
222                                 /*
223                                  * Last reference count, so
224                                  * release our reference
225                                  * count on our parent.
226                                  */
227                                 dmat = parent;
228                         } else
229                                 dmat = NULL;
230                 }
231         }
232         return (0);
233 }
234
235 /*
236  * Allocate a handle for mapping from kva/uva/physical
237  * address space into bus device space.
238  */
239 int
240 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
241 {
242         int error;
243
244         error = 0;
245
246         if (dmat->lowaddr < ptoa(Maxmem)) {
247                 /* Must bounce */
248                 int maxpages;
249
250                 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
251                                              M_NOWAIT);
252                 if (*mapp == NULL) {
253                         return (ENOMEM);
254                 } else {
255                         /* Initialize the new map */
256                         bzero(*mapp, sizeof(**mapp));
257                         STAILQ_INIT(&((*mapp)->bpages));
258                 }
259                 /*
260                  * Attempt to add pages to our pool on a per-instance
261                  * basis up to a sane limit.
262                  */
263                 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
264                 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
265                  || (dmat->map_count > 0
266                   && total_bpages < maxpages)) {
267                         int pages;
268
269                         if (dmat->lowaddr > bounce_lowaddr) {
270                                 /*
271                                  * Go through the pool and kill any pages
272                                  * that don't reside below lowaddr.
273                                  */
274                                 panic("bus_dmamap_create: page reallocation "
275                                       "not implemented");
276                         }
277                         pages = atop(dmat->maxsize);
278                         pages = MIN(maxpages - total_bpages, pages);
279                         error = alloc_bounce_pages(dmat, pages);
280
281                         if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
282                                 if (error == 0)
283                                         dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
284                         } else {
285                                 error = 0;
286                         }
287                 }
288         } else {
289                 *mapp = NULL;
290         }
291         if (error == 0)
292                 dmat->map_count++;
293         return (error);
294 }
295
296 /*
297  * Destroy a handle for mapping from kva/uva/physical
298  * address space into bus device space.
299  */
300 int
301 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
302 {
303         if (map != NULL) {
304                 if (STAILQ_FIRST(&map->bpages) != NULL)
305                         return (EBUSY);
306                 free(map, M_DEVBUF);
307         }
308         dmat->map_count--;
309         return (0);
310 }
311
312
313 /*
314  * Allocate a piece of memory that can be efficiently mapped into
315  * bus device space based on the constraints lited in the dma tag.
316  * A dmamap to for use with dmamap_load is also allocated.
317  */
318 int
319 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
320                  bus_dmamap_t *mapp)
321 {
322         /* If we succeed, no mapping/bouncing will be required */
323         *mapp = NULL;
324
325         if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
326                 *vaddr = malloc(dmat->maxsize, M_DEVBUF,
327                                 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
328         } else {
329                 /*
330                  * XXX Use Contigmalloc until it is merged into this facility
331                  *     and handles multi-seg allocations.  Nobody is doing
332                  *     multi-seg allocations yet though.
333                  */
334                 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
335                                       (flags & BUS_DMA_NOWAIT)
336                                       ? M_NOWAIT : M_WAITOK,
337                                       0ul, dmat->lowaddr, 1ul, dmat->boundary);
338         }
339         if (*vaddr == NULL)
340                 return (ENOMEM);
341         return (0);
342 }
343
344 /*
345  * Free a piece of memory and it's allociated dmamap, that was allocated
346  * via bus_dmamem_alloc.
347  */
348 void
349 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
350 {
351         /*
352          * dmamem does not need to be bounced, so the map should be
353          * NULL
354          */
355         if (map != &nobounce_dmamap)
356                 panic("bus_dmamem_free: Invalid map freed\n");
357         /* XXX There is no "contigfree" and "free" doesn't work */
358         if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem))
359                 free(vaddr, M_DEVBUF);
360 }
361
362 #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
363
364 /*
365  * Map the buffer buf into bus space using the dmamap map.
366  */
367 int
368 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
369                 bus_size_t buflen, bus_dmamap_callback_t *callback,
370                 void *callback_arg, int flags)
371 {
372         vm_offset_t             vaddr;
373         vm_offset_t             paddr;
374 #ifdef __GNUC__
375         bus_dma_segment_t       dm_segments[dmat->nsegments];
376 #else
377         bus_dma_segment_t       dm_segments[BUS_DMAMAP_NSEGS];
378 #endif
379         bus_dma_segment_t      *sg;
380         int                     seg;
381         int                     error;
382
383         if (map == NULL)
384                 map = &nobounce_dmamap;
385
386         error = 0;
387         /*
388          * If we are being called during a callback, pagesneeded will
389          * be non-zero, so we can avoid doing the work twice.
390          */
391         if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
392                 vm_offset_t     vendaddr;
393
394                 /*
395                  * Count the number of bounce pages
396                  * needed in order to complete this transfer
397                  */
398                 vaddr = trunc_page((vm_offset_t)buf);
399                 vendaddr = (vm_offset_t)buf + buflen;
400
401                 while (vaddr < vendaddr) {
402                         paddr = pmap_kextract(vaddr);
403                         if (run_filter(dmat, paddr) != 0) {
404
405                                 map->pagesneeded++;
406                         }
407                         vaddr += PAGE_SIZE;
408                 }
409         }
410
411         /* Reserve Necessary Bounce Pages */
412         if (map->pagesneeded != 0) {
413                 int s;
414
415                 s = splhigh();
416                 if (reserve_bounce_pages(dmat, map) != 0) {
417
418                         /* Queue us for resources */
419                         map->dmat = dmat;
420                         map->buf = buf;
421                         map->buflen = buflen;
422                         map->callback = callback;
423                         map->callback_arg = callback_arg;
424
425                         STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
426                         splx(s);
427
428                         return (EINPROGRESS);
429                 }
430                 splx(s);
431         }
432
433         vaddr = (vm_offset_t)buf;
434         sg = &dm_segments[0];
435         seg = 1;
436         sg->ds_len = 0;
437
438         do {
439                 bus_size_t      size;
440                 vm_offset_t     nextpaddr;      /* GCC warning expected */
441
442                 paddr = pmap_kextract(vaddr);
443                 size = PAGE_SIZE - (paddr & PAGE_MASK);
444                 if (size > buflen)
445                         size = buflen;
446
447                 if (map->pagesneeded != 0
448                  && run_filter(dmat, paddr)) {
449                         paddr = add_bounce_page(dmat, map, vaddr, size);
450                 }
451
452                 if (sg->ds_len == 0) {
453                         sg->ds_addr = paddr;
454                         sg->ds_len = size;
455                 } else if (paddr == nextpaddr) {
456                         sg->ds_len += size;
457                 } else {
458                         /* Go to the next segment */
459                         sg++;
460                         seg++;
461                         if (seg > dmat->nsegments)
462                                 break;
463                         sg->ds_addr = paddr;
464                         sg->ds_len = size;
465                 }
466                 vaddr += size;
467                 nextpaddr = paddr + size;
468                 buflen -= size;
469         } while (buflen > 0);
470
471         if (buflen != 0) {
472                 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
473                        (u_long)buflen);
474                 error = EFBIG;
475         }
476
477         (*callback)(callback_arg, dm_segments, seg, error);
478
479         return (0);
480 }
481
482 /*
483  * Release the mapping held by map.
484  */
485 void
486 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
487 {
488         struct bounce_page *bpage;
489
490         while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
491                 STAILQ_REMOVE_HEAD(&map->bpages, links);
492                 free_bounce_page(dmat, bpage);
493         }
494 }
495
496 void
497 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
498 {
499         struct bounce_page *bpage;
500
501         if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
502                 
503                 /*
504                  * Handle data bouncing.  We might also
505                  * want to add support for invalidating
506                  * the caches on broken hardware
507                  */
508                 switch (op) {
509                 case BUS_DMASYNC_PREWRITE:
510                         while (bpage != NULL) {
511                                 bcopy((void *)bpage->datavaddr,
512                                       (void *)bpage->vaddr,
513                                       bpage->datacount);
514                                 bpage = STAILQ_NEXT(bpage, links);
515                         }
516                         break;
517
518                 case BUS_DMASYNC_POSTREAD:
519                         while (bpage != NULL) {
520                                 bcopy((void *)bpage->vaddr,
521                                       (void *)bpage->datavaddr,
522                                       bpage->datacount);
523                                 bpage = STAILQ_NEXT(bpage, links);
524                         }
525                         break;
526                 case BUS_DMASYNC_PREREAD:
527                 case BUS_DMASYNC_POSTWRITE:
528                         /* No-ops */
529                         break;
530                 }
531         }
532 }
533
534 static int
535 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
536 {
537         int count;
538
539         count = 0;
540         if (total_bpages == 0) {
541                 STAILQ_INIT(&bounce_page_list);
542                 STAILQ_INIT(&bounce_map_waitinglist);
543                 STAILQ_INIT(&bounce_map_callbacklist);
544         }
545         
546         while (numpages > 0) {
547                 struct bounce_page *bpage;
548                 int s;
549
550                 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
551                                                      M_NOWAIT);
552
553                 if (bpage == NULL)
554                         break;
555                 bzero(bpage, sizeof(*bpage));
556                 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
557                                                          M_NOWAIT, 0ul,
558                                                          dmat->lowaddr,
559                                                          PAGE_SIZE,
560                                                          0);
561                 if (bpage->vaddr == NULL) {
562                         free(bpage, M_DEVBUF);
563                         break;
564                 }
565                 bpage->busaddr = pmap_kextract(bpage->vaddr);
566                 s = splhigh();
567                 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
568                 total_bpages++;
569                 free_bpages++;
570                 splx(s);
571                 count++;
572                 numpages--;
573         }
574         return (count);
575 }
576
577 static int
578 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
579 {
580         int pages;
581
582         pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
583         free_bpages -= pages;
584         reserved_bpages += pages;
585         map->pagesreserved += pages;
586         pages = map->pagesneeded - map->pagesreserved;
587
588         return (pages);
589 }
590
591 static vm_offset_t
592 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
593                 bus_size_t size)
594 {
595         int s;
596         struct bounce_page *bpage;
597
598         if (map->pagesneeded == 0)
599                 panic("add_bounce_page: map doesn't need any pages");
600         map->pagesneeded--;
601
602         if (map->pagesreserved == 0)
603                 panic("add_bounce_page: map doesn't need any pages");
604         map->pagesreserved--;
605
606         s = splhigh();
607         bpage = STAILQ_FIRST(&bounce_page_list);
608         if (bpage == NULL)
609                 panic("add_bounce_page: free page list is empty");
610
611         STAILQ_REMOVE_HEAD(&bounce_page_list, links);
612         reserved_bpages--;
613         active_bpages++;
614         splx(s);
615
616         bpage->datavaddr = vaddr;
617         bpage->datacount = size;
618         STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
619         return (bpage->busaddr);
620 }
621
622 static void
623 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
624 {
625         int s;
626         struct bus_dmamap *map;
627
628         bpage->datavaddr = 0;
629         bpage->datacount = 0;
630
631         s = splhigh();
632         STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
633         free_bpages++;
634         active_bpages--;
635         if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
636                 if (reserve_bounce_pages(map->dmat, map) == 0) {
637                         STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
638                         STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
639                                            map, links);
640                         busdma_swi_pending = 1;
641                         setsoftvm();
642                 }
643         }
644         splx(s);
645 }
646
647 void
648 busdma_swi()
649 {
650         int s;
651         struct bus_dmamap *map;
652
653         s = splhigh();
654         while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
655                 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
656                 splx(s);
657                 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
658                                 map->callback, map->callback_arg, /*flags*/0);
659                 s = splhigh();
660         }
661         splx(s);
662 }