2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2010 David Xu <davidxu@freebsd.org>.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
26 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
28 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "namespace.h"
35 #include <sys/types.h>
36 #include <sys/queue.h>
40 #include <machine/atomic.h>
50 #include <semaphore.h>
52 #include "un-namespace.h"
53 #include "libc_private.h"
55 __weak_reference(_sem_close, sem_close);
56 __weak_reference(_sem_destroy, sem_destroy);
57 __weak_reference(_sem_getvalue, sem_getvalue);
58 __weak_reference(_sem_init, sem_init);
59 __weak_reference(_sem_open, sem_open);
60 __weak_reference(_sem_post, sem_post);
61 __weak_reference(_sem_timedwait, sem_timedwait);
62 __weak_reference(_sem_clockwait_np, sem_clockwait_np);
63 __weak_reference(_sem_trywait, sem_trywait);
64 __weak_reference(_sem_unlink, sem_unlink);
65 __weak_reference(_sem_wait, sem_wait);
67 #define SEM_PREFIX "/tmp/SEMD"
68 #define SEM_MAGIC ((u_int32_t)0x73656d32)
70 _Static_assert(SEM_VALUE_MAX <= USEM_MAX_COUNT, "SEM_VALUE_MAX too large");
78 LIST_ENTRY(sem_nameinfo) next;
81 static pthread_once_t once = PTHREAD_ONCE_INIT;
82 static pthread_mutex_t sem_llock;
83 static LIST_HEAD(, sem_nameinfo) sem_list = LIST_HEAD_INITIALIZER(sem_list);
89 _pthread_mutex_lock(&sem_llock);
96 _pthread_mutex_unlock(&sem_llock);
100 sem_child_postfork(void)
103 _pthread_mutex_unlock(&sem_llock);
107 sem_module_init(void)
110 _pthread_mutex_init(&sem_llock, NULL);
111 _pthread_atfork(sem_prefork, sem_postfork, sem_child_postfork);
115 sem_check_validity(sem_t *sem)
118 if (sem->_magic == SEM_MAGIC)
125 _sem_init(sem_t *sem, int pshared, unsigned int value)
128 if (value > SEM_VALUE_MAX) {
133 bzero(sem, sizeof(sem_t));
134 sem->_magic = SEM_MAGIC;
135 sem->_kern._count = (u_int32_t)value;
136 sem->_kern._flags = pshared ? USYNC_PROCESS_SHARED : 0;
141 _sem_open(const char *name, int flags, ...)
146 struct sem_nameinfo *ni;
148 int errsave, fd, len, mode, value;
155 if (name[0] != '/') {
160 strcpy(path, SEM_PREFIX);
161 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) {
162 errno = ENAMETOOLONG;
165 if (flags & ~(O_CREAT|O_EXCL)) {
169 if ((flags & O_CREAT) != 0) {
171 mode = va_arg(ap, int);
172 value = va_arg(ap, int);
176 _pthread_once(&once, sem_module_init);
178 _pthread_mutex_lock(&sem_llock);
179 LIST_FOREACH(ni, &sem_list, next) {
180 if (ni->name != NULL && strcmp(name, ni->name) == 0) {
181 fd = _open(path, flags | O_RDWR | O_CLOEXEC |
183 if (fd == -1 || _fstat(fd, &sb) == -1) {
187 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT |
188 O_EXCL) || ni->dev != sb.st_dev ||
189 ni->ino != sb.st_ino) {
196 _pthread_mutex_unlock(&sem_llock);
202 len = sizeof(*ni) + strlen(name) + 1;
203 ni = (struct sem_nameinfo *)malloc(len);
209 ni->name = (char *)(ni+1);
210 strcpy(ni->name, name);
213 fd = _open(path, flags | O_RDWR | O_CLOEXEC | O_EXLOCK, mode);
214 if (fd == -1 || _fstat(fd, &sb) == -1)
217 if (sb.st_size < sizeof(sem_t)) {
218 tmp._magic = SEM_MAGIC;
219 tmp._kern._count = value;
220 tmp._kern._flags = USYNC_PROCESS_SHARED | SEM_NAMED;
221 if (_write(fd, &tmp, sizeof(tmp)) != sizeof(tmp))
225 sem = mmap(NULL, sizeof(sem_t), PROT_READ | PROT_WRITE,
226 MAP_SHARED | MAP_NOSYNC, fd, 0);
227 if (sem == MAP_FAILED) {
233 if (sem->_magic != SEM_MAGIC) {
241 LIST_INSERT_HEAD(&sem_list, ni, next);
243 _pthread_mutex_unlock(&sem_llock);
251 munmap(sem, sizeof(sem_t));
253 _pthread_mutex_unlock(&sem_llock);
259 _sem_close(sem_t *sem)
261 struct sem_nameinfo *ni;
264 if (sem_check_validity(sem) != 0)
267 if (!(sem->_kern._flags & SEM_NAMED)) {
272 _pthread_once(&once, sem_module_init);
274 _pthread_mutex_lock(&sem_llock);
275 LIST_FOREACH(ni, &sem_list, next) {
276 if (sem == ni->sem) {
277 last = --ni->open_count == 0;
279 LIST_REMOVE(ni, next);
280 _pthread_mutex_unlock(&sem_llock);
282 munmap(sem, sizeof(*sem));
288 _pthread_mutex_unlock(&sem_llock);
294 _sem_unlink(const char *name)
298 if (name[0] != '/') {
303 strcpy(path, SEM_PREFIX);
304 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) {
305 errno = ENAMETOOLONG;
309 return (unlink(path));
313 _sem_destroy(sem_t *sem)
316 if (sem_check_validity(sem) != 0)
319 if (sem->_kern._flags & SEM_NAMED) {
328 _sem_getvalue(sem_t * __restrict sem, int * __restrict sval)
331 if (sem_check_validity(sem) != 0)
334 *sval = (int)USEM_COUNT(sem->_kern._count);
339 usem_wake(struct _usem2 *sem)
342 return (_umtx_op(sem, UMTX_OP_SEM2_WAKE, 0, NULL, NULL));
346 usem_wait(struct _usem2 *sem, clockid_t clock_id, int flags,
347 const struct timespec *rqtp, struct timespec *rmtp)
350 struct _umtx_time timeout;
351 struct timespec remain;
361 tms.timeout._clockid = clock_id;
362 tms.timeout._flags = (flags & TIMER_ABSTIME) ? UMTX_ABSTIME : 0;
363 tms.timeout._timeout = *rqtp;
365 tm_size = sizeof(tms);
367 retval = _umtx_op(sem, UMTX_OP_SEM2_WAIT, 0, (void *)tm_size, tm_p);
368 if (retval == -1 && errno == EINTR && (flags & TIMER_ABSTIME) == 0 &&
369 rqtp != NULL && rmtp != NULL) {
377 _sem_trywait(sem_t *sem)
381 if (sem_check_validity(sem) != 0)
384 while (USEM_COUNT(val = sem->_kern._count) > 0) {
385 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1))
393 _sem_clockwait_np(sem_t * __restrict sem, clockid_t clock_id, int flags,
394 const struct timespec *rqtp, struct timespec *rmtp)
398 if (sem_check_validity(sem) != 0)
402 _pthread_testcancel();
404 while (USEM_COUNT(val = sem->_kern._count) > 0) {
405 if (atomic_cmpset_acq_int(&sem->_kern._count, val,
411 _pthread_testcancel();
416 * The timeout argument is only supposed to
417 * be checked if the thread would have blocked.
420 if (rqtp->tv_nsec >= 1000000000 || rqtp->tv_nsec < 0) {
425 _pthread_cancel_enter(1);
426 retval = usem_wait(&sem->_kern, clock_id, flags, rqtp, rmtp);
427 _pthread_cancel_leave(0);
433 _sem_timedwait(sem_t * __restrict sem,
434 const struct timespec * __restrict abstime)
437 return (_sem_clockwait_np(sem, CLOCK_REALTIME, TIMER_ABSTIME, abstime,
442 _sem_wait(sem_t *sem)
445 return (_sem_timedwait(sem, NULL));
450 * The sem_post() interface is reentrant with respect to signals and may be
451 * invoked from a signal-catching function.
452 * The implementation does not use lock, so it should be safe.
455 _sem_post(sem_t *sem)
459 if (sem_check_validity(sem) != 0)
463 count = sem->_kern._count;
464 if (USEM_COUNT(count) + 1 > SEM_VALUE_MAX) {
468 } while (!atomic_cmpset_rel_int(&sem->_kern._count, count, count + 1));
469 if (count & USEM_HAS_WAITERS)
470 usem_wake(&sem->_kern);