2 * Copyright (c) 2007 Ariff Abdullah <ariff@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
30 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
36 #if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
37 #include <dev/sound/pcm/sound.h>
40 #include <dev/sound/clone.h>
43 * So here we go again, another clonedevs manager. Unlike default clonedevs,
44 * this clone manager is designed to withstand various abusive behavior
45 * (such as 'while : ; do ls /dev/whatever ; done', etc.), reusable object
46 * after reaching certain expiration threshold, aggressive garbage collector,
47 * transparent device allocator and concurrency handling across multiple
48 * thread/proc. Due to limited information given by dev_clone EVENTHANDLER,
49 * we don't have much clues whether the caller wants a real open() or simply
50 * making fun of us with things like stat(), mtime() etc. Assuming that:
51 * 1) Time window between dev_clone EH <-> real open() should be small
52 * enough and 2) mtime()/stat() etc. always looks like a half way / stalled
53 * operation, we can decide whether a new cdev must be created, old
54 * (expired) cdev can be reused or an existing cdev can be shared.
56 * Most of the operations and logics are generic enough and can be applied
57 * on other places (such as if_tap, snp, etc). Perhaps this can be
58 * rearranged to complement clone_*(). However, due to this still being
59 * specific to the sound driver (and as a proof of concept on how it can be
60 * done), si_drv2 is used to keep the pointer of the clone list entry to
61 * avoid expensive lookup.
65 struct snd_clone_entry {
66 TAILQ_ENTRY(snd_clone_entry) link;
67 struct snd_clone *parent;
77 TAILQ_HEAD(link_head, snd_clone_entry) head;
88 #define SND_CLONE_ASSERT(x, y) do { \
93 #define SND_CLONE_ASSERT(x...) KASSERT(x)
97 * Shamelessly ripped off from vfs_subr.c
98 * We need at least 1/HZ precision as default timestamping.
100 enum { SND_TSP_SEC, SND_TSP_HZ, SND_TSP_USEC, SND_TSP_NSEC };
102 static int snd_timestamp_precision = SND_TSP_HZ;
103 TUNABLE_INT("hw.snd.timestamp_precision", &snd_timestamp_precision);
106 snd_timestamp(struct timespec *tsp)
110 switch (snd_timestamp_precision) {
112 tsp->tv_sec = time_second;
120 TIMEVAL_TO_TIMESPEC(&tv, tsp);
126 snd_timestamp_precision = SND_TSP_HZ;
132 #if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
134 sysctl_hw_snd_timestamp_precision(SYSCTL_HANDLER_ARGS)
138 val = snd_timestamp_precision;
139 err = sysctl_handle_int(oidp, &val, 0, req);
140 if (err == 0 && req->newptr != NULL) {
146 snd_timestamp_precision = val;
155 SYSCTL_PROC(_hw_snd, OID_AUTO, timestamp_precision, CTLTYPE_INT | CTLFLAG_RW,
156 0, sizeof(int), sysctl_hw_snd_timestamp_precision, "I",
157 "timestamp precision (0=s 1=hz 2=us 3=ns)");
161 * snd_clone_create() : Return opaque allocated clone manager.
164 snd_clone_create(int typemask, int maxunit, int deadline, uint32_t flags)
168 SND_CLONE_ASSERT(!(typemask & ~SND_CLONE_MAXUNIT),
169 ("invalid typemask: 0x%08x", typemask));
170 SND_CLONE_ASSERT(maxunit == -1 ||
171 !(maxunit & ~(~typemask & SND_CLONE_MAXUNIT)),
172 ("maxunit overflow: typemask=0x%08x maxunit=%d",
174 SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
175 ("invalid clone flags=0x%08x", flags));
177 c = malloc(sizeof(*c), M_DEVBUF, M_WAITOK | M_ZERO);
180 c->typemask = typemask;
181 c->maxunit = (maxunit == -1) ? (~typemask & SND_CLONE_MAXUNIT) :
183 c->deadline = deadline;
185 snd_timestamp(&c->tsp);
186 TAILQ_INIT(&c->head);
192 snd_clone_busy(struct snd_clone *c)
194 struct snd_clone_entry *ce;
196 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
201 TAILQ_FOREACH(ce, &c->head, link) {
202 if ((ce->flags & SND_CLONE_BUSY) ||
203 (ce->devt != NULL && ce->devt->si_threadcount != 0))
211 * snd_clone_enable()/disable() : Suspend/resume clone allocation through
212 * snd_clone_alloc(). Everything else will not be affected by this.
215 snd_clone_enable(struct snd_clone *c)
217 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
219 if (c->flags & SND_CLONE_ENABLE)
222 c->flags |= SND_CLONE_ENABLE;
228 snd_clone_disable(struct snd_clone *c)
230 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
232 if (!(c->flags & SND_CLONE_ENABLE))
235 c->flags &= ~SND_CLONE_ENABLE;
241 * Getters / Setters. Not worth explaining :)
244 snd_clone_getsize(struct snd_clone *c)
246 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
252 snd_clone_getmaxunit(struct snd_clone *c)
254 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
260 snd_clone_setmaxunit(struct snd_clone *c, int maxunit)
262 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
263 SND_CLONE_ASSERT(maxunit == -1 ||
264 !(maxunit & ~(~c->typemask & SND_CLONE_MAXUNIT)),
265 ("maxunit overflow: typemask=0x%08x maxunit=%d",
266 c->typemask, maxunit));
268 c->maxunit = (maxunit == -1) ? (~c->typemask & SND_CLONE_MAXUNIT) :
275 snd_clone_getdeadline(struct snd_clone *c)
277 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
279 return (c->deadline);
283 snd_clone_setdeadline(struct snd_clone *c, int deadline)
285 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
287 c->deadline = deadline;
289 return (c->deadline);
293 snd_clone_gettime(struct snd_clone *c, struct timespec *tsp)
295 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
296 SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
304 snd_clone_getflags(struct snd_clone *c)
306 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
312 snd_clone_setflags(struct snd_clone *c, uint32_t flags)
314 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
315 SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
316 ("invalid clone flags=0x%08x", flags));
324 snd_clone_getdevtime(struct cdev *dev, struct timespec *tsp)
326 struct snd_clone_entry *ce;
328 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
329 SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
335 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
343 snd_clone_getdevflags(struct cdev *dev)
345 struct snd_clone_entry *ce;
347 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
353 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
359 snd_clone_setdevflags(struct cdev *dev, uint32_t flags)
361 struct snd_clone_entry *ce;
363 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
364 SND_CLONE_ASSERT(!(flags & ~SND_CLONE_DEVMASK),
365 ("invalid clone dev flags=0x%08x", flags));
371 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
378 /* Elapsed time conversion to ms */
379 #define SND_CLONE_ELAPSED(x, y) \
380 ((((x)->tv_sec - (y)->tv_sec) * 1000) + \
381 (((y)->tv_nsec > (x)->tv_nsec) ? \
382 (((1000000000L + (x)->tv_nsec - \
383 (y)->tv_nsec) / 1000000) - 1000) : \
384 (((x)->tv_nsec - (y)->tv_nsec) / 1000000)))
386 #define SND_CLONE_EXPIRED(x, y, z) \
387 ((x)->deadline < 1 || \
388 ((y)->tv_sec - (z)->tv_sec) > ((x)->deadline / 1000) || \
389 SND_CLONE_ELAPSED(y, z) > (x)->deadline)
392 * snd_clone_gc() : Garbage collector for stalled, expired objects. Refer to
393 * clone.h for explanations on GC settings.
396 snd_clone_gc(struct snd_clone *c)
398 struct snd_clone_entry *ce, *tce;
402 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
404 if (!(c->flags & SND_CLONE_GC_ENABLE) || c->size == 0)
410 * Bail out if the last clone handler was invoked below the deadline
413 if ((c->flags & SND_CLONE_GC_EXPIRED) &&
414 !SND_CLONE_EXPIRED(c, &now, &c->tsp))
420 * Visit each object in reverse order. If the object is still being
421 * referenced by a valid open(), skip it. Look for expired objects
422 * and either revoke its clone invocation status or mercilessly
425 TAILQ_FOREACH_REVERSE_SAFE(ce, &c->head, link_head, link, tce) {
426 if (!(ce->flags & SND_CLONE_BUSY) &&
427 (!(ce->flags & SND_CLONE_INVOKE) ||
428 SND_CLONE_EXPIRED(c, &now, &ce->tsp))) {
429 if ((c->flags & SND_CLONE_GC_REVOKE) ||
430 ce->devt->si_threadcount != 0) {
431 ce->flags &= ~SND_CLONE_INVOKE;
434 TAILQ_REMOVE(&c->head, ce, link);
435 destroy_dev(ce->devt);
443 /* return total pruned objects */
448 snd_clone_destroy(struct snd_clone *c)
450 struct snd_clone_entry *ce, *tmp;
452 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
454 ce = TAILQ_FIRST(&c->head);
456 tmp = TAILQ_NEXT(ce, link);
457 if (ce->devt != NULL)
458 destroy_dev(ce->devt);
467 * snd_clone_acquire() : The vital part of concurrency management. Must be
468 * called somewhere at the beginning of open() handler. ENODEV is not really
469 * fatal since it just tell the caller that this is not cloned stuff.
470 * EBUSY is *real*, don't forget that!
473 snd_clone_acquire(struct cdev *dev)
475 struct snd_clone_entry *ce;
477 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
483 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
485 ce->flags &= ~SND_CLONE_INVOKE;
487 if (ce->flags & SND_CLONE_BUSY)
490 ce->flags |= SND_CLONE_BUSY;
496 * snd_clone_release() : Release busy status. Must be called somewhere at
497 * the end of close() handler, or somewhere after fail open().
500 snd_clone_release(struct cdev *dev)
502 struct snd_clone_entry *ce;
504 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
510 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
512 ce->flags &= ~SND_CLONE_INVOKE;
514 if (!(ce->flags & SND_CLONE_BUSY))
517 ce->flags &= ~SND_CLONE_BUSY;
524 * snd_clone_ref/unref() : Garbage collector reference counter. To make
525 * garbage collector run automatically, the sequence must be something like
526 * this (both in open() and close() handlers):
528 * open() - 1) snd_clone_acquire()
529 * 2) .... check check ... if failed, snd_clone_release()
530 * 3) Success. Call snd_clone_ref()
532 * close() - 1) .... check check check ....
533 * 2) Success. snd_clone_release()
534 * 3) snd_clone_unref() . Garbage collector will run at this point
535 * if this is the last referenced object.
538 snd_clone_ref(struct cdev *dev)
540 struct snd_clone_entry *ce;
543 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
550 SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
551 SND_CLONE_ASSERT(c->refcount >= 0, ("refcount < 0"));
553 return (++c->refcount);
557 snd_clone_unref(struct cdev *dev)
559 struct snd_clone_entry *ce;
562 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
569 SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
570 SND_CLONE_ASSERT(c->refcount > 0, ("refcount <= 0"));
575 * Run automatic garbage collector, if needed.
577 if ((c->flags & SND_CLONE_GC_UNREF) &&
578 (!(c->flags & SND_CLONE_GC_LASTREF) ||
579 (c->refcount == 0 && (c->flags & SND_CLONE_GC_LASTREF))))
580 (void)snd_clone_gc(c);
582 return (c->refcount);
586 snd_clone_register(struct snd_clone_entry *ce, struct cdev *dev)
588 SND_CLONE_ASSERT(ce != NULL, ("NULL snd_clone_entry"));
589 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
590 SND_CLONE_ASSERT(dev->si_drv2 == NULL, ("dev->si_drv2 not NULL"));
591 SND_CLONE_ASSERT((ce->flags & SND_CLONE_ALLOC) == SND_CLONE_ALLOC,
592 ("invalid clone alloc flags=0x%08x", ce->flags));
593 SND_CLONE_ASSERT(ce->devt == NULL, ("ce->devt not NULL"));
594 SND_CLONE_ASSERT(ce->unit == dev2unit(dev),
595 ("invalid unit ce->unit=0x%08x dev2unit=0x%08x",
596 ce->unit, dev2unit(dev)));
598 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
602 ce->flags &= ~SND_CLONE_ALLOC;
603 ce->flags |= SND_CLONE_INVOKE;
606 struct snd_clone_entry *
607 snd_clone_alloc(struct snd_clone *c, struct cdev **dev, int *unit, int tmask)
609 struct snd_clone_entry *ce, *after, *bce, *cce, *nce, *tce;
611 int cunit, allocunit;
614 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
615 SND_CLONE_ASSERT(dev != NULL, ("NULL dev pointer"));
616 SND_CLONE_ASSERT((c->typemask & tmask) == tmask,
617 ("invalid tmask: typemask=0x%08x tmask=0x%08x",
618 c->typemask, tmask));
619 SND_CLONE_ASSERT(unit != NULL, ("NULL unit pointer"));
620 SND_CLONE_ASSERT(*unit == -1 || !(*unit & (c->typemask | tmask)),
621 ("typemask collision: typemask=0x%08x tmask=0x%08x *unit=%d",
622 c->typemask, tmask, *unit));
624 if (!(c->flags & SND_CLONE_ENABLE) ||
625 (*unit != -1 && *unit > c->maxunit))
630 bce = NULL; /* "b"usy candidate */
631 cce = NULL; /* "c"urthread/proc candidate */
632 nce = NULL; /* "n"ull, totally unbusy candidate */
633 tce = NULL; /* Last "t"ry candidate */
635 allocunit = (*unit == -1) ? 0 : *unit;
636 curpid = curthread->td_proc->p_pid;
640 TAILQ_FOREACH(ce, &c->head, link) {
642 * Sort incrementally according to device type.
644 if (tmask > (ce->unit & c->typemask)) {
648 } else if (tmask < (ce->unit & c->typemask))
652 * Shoot.. this is where the grumpiness begin. Just
653 * return immediately.
655 if (*unit != -1 && *unit == (ce->unit & ~tmask))
656 goto snd_clone_alloc_out;
660 * Simmilar device type. Sort incrementally according
661 * to allocation unit. While here, look for free slot
662 * and possible collision for new / future allocation.
664 if (*unit == -1 && (ce->unit & ~tmask) == allocunit)
666 if ((ce->unit & ~tmask) < allocunit)
670 * 1. Look for non busy, but keep track of the best
671 * possible busy cdev.
672 * 2. Look for the best (oldest referenced) entry that is
673 * in a same process / thread.
674 * 3. Look for the best (oldest referenced), absolute free
676 * 4. Lastly, look for the best (oldest referenced)
677 * any entries that doesn't fit with anything above.
679 if (ce->flags & SND_CLONE_BUSY) {
680 if (ce->devt != NULL && (bce == NULL ||
681 timespeccmp(&ce->tsp, &bce->tsp, <)))
685 if (ce->pid == curpid &&
686 (cce == NULL || timespeccmp(&ce->tsp, &cce->tsp, <)))
688 else if (!(ce->flags & SND_CLONE_INVOKE) &&
689 (nce == NULL || timespeccmp(&ce->tsp, &nce->tsp, <)))
691 else if (tce == NULL || timespeccmp(&ce->tsp, &tce->tsp, <))
695 goto snd_clone_alloc_new;
696 else if (cce != NULL) {
697 /* Same proc entry found, go for it */
699 goto snd_clone_alloc_out;
700 } else if (nce != NULL) {
702 * Next, try absolute free entry. If the calculated
703 * allocunit is smaller, create new entry instead.
705 if (allocunit < (nce->unit & ~tmask))
706 goto snd_clone_alloc_new;
708 goto snd_clone_alloc_out;
709 } else if (allocunit > c->maxunit) {
711 * Maximum allowable unit reached. Try returning any
712 * available cdev and hope for the best. If the lookup is
713 * done for things like stat(), mtime() etc. , things should
714 * be ok. Otherwise, open() handler should do further checks
715 * and decide whether to return correct error code or not.
719 goto snd_clone_alloc_out;
720 } else if (bce != NULL) {
722 goto snd_clone_alloc_out;
729 * No free entries found, and we still haven't reached maximum
730 * allowable units. Allocate, setup a minimal unique entry with busy
731 * status so nobody will monkey on this new entry. Unit magic is set
732 * right here to avoid collision with other contesting handler.
733 * The caller must be carefull here to maintain its own
734 * synchronization, as long as it will not conflict with malloc(9)
737 * That said, go figure.
739 ce = malloc(sizeof(*ce), M_DEVBUF,
740 ((c->flags & SND_CLONE_WAITOK) ? M_WAITOK : M_NOWAIT) | M_ZERO);
745 * We're being dense, ignorance is bliss,
746 * Super Regulatory Measure (TM).. TRY AGAIN!
750 goto snd_clone_alloc_out;
751 } else if (tce != NULL) {
753 goto snd_clone_alloc_out;
754 } else if (bce != NULL) {
756 goto snd_clone_alloc_out;
760 /* Setup new entry */
762 ce->unit = tmask | allocunit;
765 ce->flags |= SND_CLONE_ALLOC;
767 TAILQ_INSERT_AFTER(&c->head, after, ce, link);
769 TAILQ_INSERT_HEAD(&c->head, ce, link);
774 * Save new allocation unit for caller which will be used
783 * Set, mark, timestamp the entry if this is a truly free entry.
784 * Leave busy entry alone.
786 if (!(ce->flags & SND_CLONE_BUSY)) {
789 ce->flags |= SND_CLONE_INVOKE;