]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - lib/libkse/thread/thr_pspinlock.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / lib / libkse / thread / thr_pspinlock.c
1 /*-
2  * Copyright (c) 2003 David Xu <davidxu@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include "namespace.h"
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <stdint.h>
34 #include <stdlib.h>
35 #include "un-namespace.h"
36
37 #include "atomic_ops.h"
38 #include "thr_private.h"
39
40 #define SPIN_COUNT 10000
41
42 __weak_reference(_pthread_spin_init, pthread_spin_init);
43 __weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
44 __weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
45 __weak_reference(_pthread_spin_lock, pthread_spin_lock);
46 __weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
47
48 int
49 _pthread_spin_init(pthread_spinlock_t *lock, int pshared)
50 {
51         struct pthread_spinlock *lck;
52         int ret;
53
54         if (lock == NULL || pshared != PTHREAD_PROCESS_PRIVATE)
55                 ret = EINVAL;
56         else if ((lck = malloc(sizeof(struct pthread_spinlock))) == NULL)
57                 ret = ENOMEM;
58         else {
59                 lck->s_lock = 0;
60                 lck->s_owner= NULL;
61                 *lock = lck;
62                 ret = 0;
63         }
64
65         return (ret);
66 }
67
68 int
69 _pthread_spin_destroy(pthread_spinlock_t *lock)
70 {
71         int ret;
72
73         if (lock == NULL || *lock == NULL)
74                 ret = EINVAL;
75         else if ((*lock)->s_owner != NULL)
76                 ret = EBUSY;
77         else {
78                 free(*lock);
79                 *lock = NULL;
80                 ret = 0;
81         }
82
83         return (ret);
84 }
85
86 int
87 _pthread_spin_trylock(pthread_spinlock_t *lock)
88 {
89         struct pthread_spinlock *lck;
90         struct pthread *self = _pthread_self();
91         int oldval, ret;
92
93         if (lock == NULL || (lck = *lock) == NULL)
94                 ret = EINVAL;
95         else if (lck->s_owner == self)
96                 ret = EDEADLK;
97         else if (lck->s_lock != 0)
98                 ret = EBUSY;
99         else {
100                 atomic_swap_int(&(lck)->s_lock, 1, &oldval);
101                 if (oldval)
102                         ret = EBUSY;
103                 else {
104                         lck->s_owner = _pthread_self();
105                         ret = 0;
106                 }
107         }
108         return (ret);
109 }
110
111 int
112 _pthread_spin_lock(pthread_spinlock_t *lock)
113 {
114         struct pthread_spinlock *lck;
115         struct pthread *self = _pthread_self();
116         int count, oldval, ret;
117
118         if (lock == NULL || (lck = *lock) == NULL)
119                 ret = EINVAL;
120         else if (lck->s_owner == self)
121                 ret = EDEADLK;
122         else {
123                 do {
124                         count = SPIN_COUNT;
125                         while (lck->s_lock) {
126 #ifdef __i386__
127                                 /* tell cpu we are spinning */
128                                 __asm __volatile("pause");
129 #endif
130                                 if (--count <= 0) {
131                                         count = SPIN_COUNT;
132                                         _pthread_yield();
133                                 }
134                         }
135                         atomic_swap_int(&(lck)->s_lock, 1, &oldval);
136                 } while (oldval);
137
138                 lck->s_owner = self;
139                 ret = 0;
140         }
141
142         return (ret);
143 }
144
145 int
146 _pthread_spin_unlock(pthread_spinlock_t *lock)
147 {
148         struct pthread_spinlock *lck;
149         int ret;
150
151         if (lock == NULL || (lck = *lock) == NULL)
152                 ret = EINVAL;
153         else {
154                 if (lck->s_owner != _pthread_self())
155                         ret = EPERM;
156                 else {
157                         lck->s_owner = NULL;
158                         atomic_swap_int(&lck->s_lock, 0, &ret);
159                         ret = 0;
160                 }
161         }
162
163         return (ret);
164 }
165