]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libkse/thread/thr_pspinlock.c
This commit was generated by cvs2svn to compensate for changes in r145557,
[FreeBSD/FreeBSD.git] / lib / libkse / thread / thr_pspinlock.c
1 /*-
2  * Copyright (c) 2003 David Xu <davidxu@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <errno.h>
30 #include <stdlib.h>
31 #include <pthread.h>
32 #include <atomic_ops.h>
33 #include "thr_private.h"
34
35 #define SPIN_COUNT 10000
36
37 __weak_reference(_pthread_spin_init, pthread_spin_init);
38 __weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
39 __weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
40 __weak_reference(_pthread_spin_lock, pthread_spin_lock);
41 __weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
42
43 int
44 _pthread_spin_init(pthread_spinlock_t *lock, int pshared)
45 {
46         struct pthread_spinlock *lck;
47         int ret;
48
49         if (lock == NULL || pshared != PTHREAD_PROCESS_PRIVATE)
50                 ret = EINVAL;
51         else if ((lck = malloc(sizeof(struct pthread_spinlock))) == NULL)
52                 ret = ENOMEM;
53         else {
54                 lck->s_lock = 0;
55                 lck->s_owner= NULL;
56                 *lock = lck;
57                 ret = 0;
58         }
59
60         return (ret);
61 }
62
63 int
64 _pthread_spin_destroy(pthread_spinlock_t *lock)
65 {
66         int ret;
67
68         if (lock == NULL || *lock == NULL)
69                 ret = EINVAL;
70         else if ((*lock)->s_owner != NULL)
71                 ret = EBUSY;
72         else {
73                 free(*lock);
74                 *lock = NULL;
75                 ret = 0;
76         }
77
78         return (ret);
79 }
80
81 int
82 _pthread_spin_trylock(pthread_spinlock_t *lock)
83 {
84         struct pthread_spinlock *lck;
85         struct pthread *self = _pthread_self();
86         int oldval, ret;
87
88         if (lock == NULL || (lck = *lock) == NULL)
89                 ret = EINVAL;
90         else if (lck->s_owner == self)
91                 ret = EDEADLK;
92         else if (lck->s_lock != 0)
93                 ret = EBUSY;
94         else {
95                 atomic_swap_int((int *)&(lck)->s_lock, 1, &oldval);
96                 if (oldval)
97                         ret = EBUSY;
98                 else {
99                         lck->s_owner = _pthread_self();
100                         ret = 0;
101                 }
102         }
103         return (ret);
104 }
105
106 int
107 _pthread_spin_lock(pthread_spinlock_t *lock)
108 {
109         struct pthread_spinlock *lck;
110         struct pthread *self = _pthread_self();
111         int count, oldval, ret;
112
113         if (lock == NULL || (lck = *lock) == NULL)
114                 ret = EINVAL;
115         else if (lck->s_owner == self)
116                 ret = EDEADLK;
117         else {
118                 do {
119                         count = SPIN_COUNT;
120                         while (lck->s_lock) {
121 #ifdef __i386__
122                                 /* tell cpu we are spinning */
123                                 __asm __volatile("pause");
124 #endif
125                                 if (--count <= 0) {
126                                         count = SPIN_COUNT;
127                                         _pthread_yield();
128                                 }
129                         }
130                         atomic_swap_int((int *)&(lck)->s_lock, 1, &oldval);
131                 } while (oldval);
132
133                 lck->s_owner = self;
134                 ret = 0;
135         }
136
137         return (ret);
138 }
139
140 int
141 _pthread_spin_unlock(pthread_spinlock_t *lock)
142 {
143         struct pthread_spinlock *lck;
144         int ret;
145
146         if (lock == NULL || (lck = *lock) == NULL)
147                 ret = EINVAL;
148         else {
149                 if (lck->s_owner != _pthread_self())
150                         ret = EPERM;
151                 else {
152                         lck->s_owner = NULL;
153                         atomic_swap_int((int *)&lck->s_lock, 0, &ret);
154                         ret = 0;
155                 }
156         }
157
158         return (ret);
159 }
160