2 * Copyright (C) 2010 David Xu <davidxu@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "namespace.h"
33 #include <sys/types.h>
34 #include <sys/queue.h>
38 #include <machine/atomic.h>
47 #include <semaphore.h>
49 #include "un-namespace.h"
50 #include "libc_private.h"
52 __weak_reference(_sem_close, sem_close);
53 __weak_reference(_sem_destroy, sem_destroy);
54 __weak_reference(_sem_getvalue, sem_getvalue);
55 __weak_reference(_sem_init, sem_init);
56 __weak_reference(_sem_open, sem_open);
57 __weak_reference(_sem_post, sem_post);
58 __weak_reference(_sem_timedwait, sem_timedwait);
59 __weak_reference(_sem_trywait, sem_trywait);
60 __weak_reference(_sem_unlink, sem_unlink);
61 __weak_reference(_sem_wait, sem_wait);
63 #define SEM_PREFIX "/tmp/SEMD"
64 #define SEM_MAGIC ((u_int32_t)0x73656d31)
70 LIST_ENTRY(sem_nameinfo) next;
73 static pthread_once_t once = PTHREAD_ONCE_INIT;
74 static pthread_mutex_t sem_llock;
75 static LIST_HEAD(,sem_nameinfo) sem_list = LIST_HEAD_INITIALIZER(sem_list);
81 _pthread_mutex_lock(&sem_llock);
87 _pthread_mutex_unlock(&sem_llock);
93 _pthread_mutex_unlock(&sem_llock);
99 pthread_mutexattr_t ma;
101 _pthread_mutexattr_init(&ma);
102 _pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_RECURSIVE);
103 _pthread_mutex_init(&sem_llock, &ma);
104 _pthread_mutexattr_destroy(&ma);
105 _pthread_atfork(sem_prefork, sem_postfork, sem_child_postfork);
109 sem_check_validity(sem_t *sem)
112 if (sem->_magic == SEM_MAGIC)
121 _sem_init(sem_t *sem, int pshared, unsigned int value)
124 if (value > SEM_VALUE_MAX) {
129 bzero(sem, sizeof(sem_t));
130 sem->_magic = SEM_MAGIC;
131 sem->_kern._count = (u_int32_t)value;
132 sem->_kern._has_waiters = 0;
133 sem->_kern._flags = pshared ? USYNC_PROCESS_SHARED : 0;
138 _sem_open(const char *name, int flags, ...)
144 struct sem_nameinfo *ni = NULL;
146 int fd = -1, mode, len, errsave;
149 if (name[0] != '/') {
155 if (flags & ~(O_CREAT|O_EXCL)) {
160 _pthread_once(&once, sem_module_init);
162 _pthread_mutex_lock(&sem_llock);
163 LIST_FOREACH(ni, &sem_list, next) {
164 if (strcmp(name, ni->name) == 0) {
165 if ((flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) {
166 _pthread_mutex_unlock(&sem_llock);
172 _pthread_mutex_unlock(&sem_llock);
178 if (flags & O_CREAT) {
180 mode = va_arg(ap, int);
181 value = va_arg(ap, int);
185 len = sizeof(*ni) + strlen(name) + 1;
186 ni = (struct sem_nameinfo *)malloc(len);
192 ni->name = (char *)(ni+1);
193 strcpy(ni->name, name);
195 strcpy(path, SEM_PREFIX);
196 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) {
197 errno = ENAMETOOLONG;
201 fd = _open(path, flags|O_RDWR, mode);
204 if (flock(fd, LOCK_EX) == -1)
206 if (_fstat(fd, &sb)) {
210 if (sb.st_size < sizeof(sem_t)) {
213 tmp._magic = SEM_MAGIC;
214 tmp._kern._has_waiters = 0;
215 tmp._kern._count = value;
216 tmp._kern._flags = USYNC_PROCESS_SHARED | SEM_NAMED;
217 if (_write(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) {
223 sem = (sem_t *)mmap(NULL, sizeof(sem_t), PROT_READ|PROT_WRITE,
224 MAP_SHARED|MAP_NOSYNC, fd, 0);
225 if (sem == MAP_FAILED) {
231 if (sem->_magic != SEM_MAGIC) {
237 LIST_INSERT_HEAD(&sem_list, ni, next);
238 _pthread_mutex_unlock(&sem_llock);
244 _pthread_mutex_unlock(&sem_llock);
248 munmap(sem, sizeof(sem_t));
255 _sem_close(sem_t *sem)
257 struct sem_nameinfo *ni;
259 if (sem_check_validity(sem) != 0)
262 if (!(sem->_kern._flags & SEM_NAMED)) {
267 _pthread_once(&once, sem_module_init);
269 _pthread_mutex_lock(&sem_llock);
270 LIST_FOREACH(ni, &sem_list, next) {
271 if (sem == ni->sem) {
272 if (--ni->open_count > 0) {
273 _pthread_mutex_unlock(&sem_llock);
282 LIST_REMOVE(ni, next);
283 _pthread_mutex_unlock(&sem_llock);
284 munmap(sem, sizeof(*sem));
288 _pthread_mutex_unlock(&sem_llock);
294 _sem_unlink(const char *name)
298 if (name[0] != '/') {
304 strcpy(path, SEM_PREFIX);
305 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) {
306 errno = ENAMETOOLONG;
313 _sem_destroy(sem_t *sem)
316 if (sem_check_validity(sem) != 0)
319 if (sem->_kern._flags & SEM_NAMED) {
328 _sem_getvalue(sem_t * __restrict sem, int * __restrict sval)
331 if (sem_check_validity(sem) != 0)
334 *sval = (int)sem->_kern._count;
339 usem_wake(struct _usem *sem)
341 if (!sem->_has_waiters)
343 return _umtx_op(sem, UMTX_OP_SEM_WAKE, 0, NULL, NULL);
347 usem_wait(struct _usem *sem, const struct timespec *timeout)
349 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
350 timeout->tv_nsec <= 0))) {
354 return _umtx_op(sem, UMTX_OP_SEM_WAIT, 0, NULL,
355 __DECONST(void*, timeout));
359 _sem_trywait(sem_t *sem)
363 if (sem_check_validity(sem) != 0)
366 while ((val = sem->_kern._count) > 0) {
367 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1))
374 #define TIMESPEC_SUB(dst, src, val) \
376 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
377 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
378 if ((dst)->tv_nsec < 0) { \
380 (dst)->tv_nsec += 1000000000; \
386 _sem_timedwait(sem_t * __restrict sem,
387 const struct timespec * __restrict abstime)
389 struct timespec ts, ts2;
392 if (sem_check_validity(sem) != 0)
397 while ((val = sem->_kern._count) > 0) {
398 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1))
403 _pthread_testcancel();
408 * The timeout argument is only supposed to
409 * be checked if the thread would have blocked.
411 if (abstime != NULL) {
412 if (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0) {
416 clock_gettime(CLOCK_REALTIME, &ts);
417 TIMESPEC_SUB(&ts2, abstime, &ts);
419 _pthread_cancel_enter(1);
420 retval = usem_wait(&sem->_kern, abstime ? &ts2 : NULL);
421 _pthread_cancel_leave(0);
427 _sem_wait(sem_t *sem)
429 return _sem_timedwait(sem, NULL);
434 * The sem_post() interface is reentrant with respect to signals and may be
435 * invoked from a signal-catching function.
436 * The implementation does not use lock, so it should be safe.
439 _sem_post(sem_t *sem)
442 if (sem_check_validity(sem) != 0)
445 atomic_add_rel_int(&sem->_kern._count, 1);
446 return usem_wake(&sem->_kern);