]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - lib/libkse/thread/thr_spinlock.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / lib / libkse / thread / thr_spinlock.c
1 /*
2  * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  *
31  */
32
33 #include <sys/types.h>
34 #include <machine/atomic.h>
35
36 #include <libc_private.h>
37 #include "spinlock.h"
38 #include "thr_private.h"
39
40 #define MAX_SPINLOCKS   72
41
42 struct spinlock_extra {
43         spinlock_t      *owner;
44         pthread_mutex_t lock;
45 };
46
47 static void     init_spinlock(spinlock_t *lck);
48
49 static struct pthread_mutex_attr static_mutex_attr =
50     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
51 static pthread_mutexattr_t      static_mattr = &static_mutex_attr;
52
53 static pthread_mutex_t          spinlock_static_lock;
54 static struct spinlock_extra    extra[MAX_SPINLOCKS];
55 static int                      spinlock_count = 0;
56 static int                      initialized = 0;
57
58 LT10_COMPAT_PRIVATE(_spinlock);
59 LT10_COMPAT_PRIVATE(_spinlock_debug);
60 LT10_COMPAT_PRIVATE(_spinunlock);
61
62 /*
63  * These are for compatability only.  Spinlocks of this type
64  * are deprecated.
65  */
66
67 void
68 _spinunlock(spinlock_t *lck)
69 {
70         struct spinlock_extra *extra;
71
72         extra = (struct spinlock_extra *)lck->fname;
73         _pthread_mutex_unlock(&extra->lock);
74 }
75
76 /*
77  * Lock a location for the running thread. Yield to allow other
78  * threads to run if this thread is blocked because the lock is
79  * not available. Note that this function does not sleep. It
80  * assumes that the lock will be available very soon.
81  */
82 void
83 _spinlock(spinlock_t *lck)
84 {
85         struct spinlock_extra *extra;
86
87         if (!__isthreaded)
88                 PANIC("Spinlock called when not threaded.");
89         if (!initialized)
90                 PANIC("Spinlocks not initialized.");
91         /*
92          * Try to grab the lock and loop if another thread grabs
93          * it before we do.
94          */
95         if (lck->fname == NULL)
96                 init_spinlock(lck);
97         extra = (struct spinlock_extra *)lck->fname;
98         _pthread_mutex_lock(&extra->lock);
99 }
100
101 /*
102  * Lock a location for the running thread. Yield to allow other
103  * threads to run if this thread is blocked because the lock is
104  * not available. Note that this function does not sleep. It
105  * assumes that the lock will be available very soon.
106  *
107  * This function checks if the running thread has already locked the
108  * location, warns if this occurs and creates a thread dump before
109  * returning.
110  */
111 void
112 _spinlock_debug(spinlock_t *lck, char *fname, int lineno)
113 {
114         _spinlock(lck);
115 }
116
117 static void
118 init_spinlock(spinlock_t *lck)
119 {
120         _pthread_mutex_lock(&spinlock_static_lock);
121         if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
122                 lck->fname = (char *)&extra[spinlock_count];
123                 extra[spinlock_count].owner = lck;
124                 spinlock_count++;
125         }
126         _pthread_mutex_unlock(&spinlock_static_lock);
127         if (lck->fname == NULL)
128                 PANIC("Exceeded max spinlocks");
129 }
130
131 void
132 _thr_spinlock_init(void)
133 {
134         int i;
135
136         if (initialized != 0) {
137                 _thr_mutex_reinit(&spinlock_static_lock);
138                 for (i = 0; i < spinlock_count; i++)
139                         _thr_mutex_reinit(&extra[i].lock);
140         } else {
141                 if (_pthread_mutex_init(&spinlock_static_lock, &static_mattr))
142                         PANIC("Cannot initialize spinlock_static_lock");
143                 for (i = 0; i < MAX_SPINLOCKS; i++) {
144                         if (_pthread_mutex_init(&extra[i].lock, &static_mattr))
145                                 PANIC("Cannot initialize spinlock extra");
146                 }
147                 initialized = 1;
148         }
149 }