2 * Implementation of SVID semaphores
4 * Author: Daniel Boulet
6 * This software is provided ``AS IS'' without any warranties of any kind.
9 * Copyright (c) 2003-2005 McAfee, Inc.
10 * All rights reserved.
12 * This software was developed for the FreeBSD Project in part by McAfee
13 * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR
14 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_compat.h"
43 #include "opt_sysvipc.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/eventhandler.h>
49 #include <sys/kernel.h>
52 #include <sys/module.h>
53 #include <sys/mutex.h>
55 #include <sys/syscall.h>
56 #include <sys/syscallsubr.h>
57 #include <sys/sysent.h>
58 #include <sys/sysctl.h>
60 #include <sys/malloc.h>
63 #include <security/mac/mac_framework.h>
65 static MALLOC_DEFINE(M_SEM, "sem", "SVID compatible semaphores");
68 #define DPRINTF(a) printf a
73 static void seminit(void);
74 static int sysvsem_modload(struct module *, int, void *);
75 static int semunload(void);
76 static void semexit_myhook(void *arg, struct proc *p);
77 static int sysctl_sema(SYSCTL_HANDLER_ARGS);
78 static int semvalid(int semid, struct semid_kernel *semakptr);
80 #ifndef _SYS_SYSPROTO_H_
82 int __semctl(struct thread *td, struct __semctl_args *uap);
84 int semget(struct thread *td, struct semget_args *uap);
86 int semop(struct thread *td, struct semop_args *uap);
89 static struct sem_undo *semu_alloc(struct thread *td);
90 static int semundo_adjust(struct thread *td, struct sem_undo **supptr,
91 int semid, int semseq, int semnum, int adjval);
92 static void semundo_clear(int semid, int semnum);
94 static struct mtx sem_mtx; /* semaphore global lock */
95 static struct mtx sem_undo_mtx;
96 static int semtot = 0;
97 static struct semid_kernel *sema; /* semaphore id pool */
98 static struct mtx *sema_mtx; /* semaphore id pool mutexes*/
99 static struct sem *sem; /* semaphore pool */
100 LIST_HEAD(, sem_undo) semu_list; /* list of active undo structures */
101 LIST_HEAD(, sem_undo) semu_free_list; /* list of free undo structures */
102 static int *semu; /* undo structure pool */
103 static eventhandler_tag semexit_tag;
105 #define SEMUNDO_MTX sem_undo_mtx
106 #define SEMUNDO_LOCK() mtx_lock(&SEMUNDO_MTX);
107 #define SEMUNDO_UNLOCK() mtx_unlock(&SEMUNDO_MTX);
108 #define SEMUNDO_LOCKASSERT(how) mtx_assert(&SEMUNDO_MTX, (how));
111 u_short semval; /* semaphore value */
112 pid_t sempid; /* pid of last operation */
113 u_short semncnt; /* # awaiting semval > cval */
114 u_short semzcnt; /* # awaiting semval = 0 */
118 * Undo structure (one per process)
121 LIST_ENTRY(sem_undo) un_next; /* ptr to next active undo structure */
122 struct proc *un_proc; /* owner of this structure */
123 short un_cnt; /* # of active entries */
125 short un_adjval; /* adjust on exit values */
126 short un_num; /* semaphore # */
127 int un_id; /* semid */
128 unsigned short un_seq;
129 } un_ent[1]; /* undo entries */
133 * Configuration parameters
136 #define SEMMNI 10 /* # of semaphore identifiers */
139 #define SEMMNS 60 /* # of semaphores in system */
142 #define SEMUME 10 /* max # of undo entries per process */
145 #define SEMMNU 30 /* # of undo structures in system */
148 /* shouldn't need tuning */
150 #define SEMMAP 30 /* # of entries in semaphore map */
153 #define SEMMSL SEMMNS /* max # of semaphores per id */
156 #define SEMOPM 100 /* max # of operations per semop call */
159 #define SEMVMX 32767 /* semaphore maximum value */
160 #define SEMAEM 16384 /* adjust on exit max value */
163 * Due to the way semaphore memory is allocated, we have to ensure that
164 * SEMUSZ is properly aligned.
167 #define SEM_ALIGN(bytes) (((bytes) + (sizeof(long) - 1)) & ~(sizeof(long) - 1))
169 /* actual size of an undo structure */
170 #define SEMUSZ SEM_ALIGN(offsetof(struct sem_undo, un_ent[SEMUME]))
173 * Macro to find a particular sem_undo vector
176 ((struct sem_undo *)(((intptr_t)semu)+ix * seminfo.semusz))
179 * semaphore info struct
181 struct seminfo seminfo = {
182 SEMMAP, /* # of entries in semaphore map */
183 SEMMNI, /* # of semaphore identifiers */
184 SEMMNS, /* # of semaphores in system */
185 SEMMNU, /* # of undo structures in system */
186 SEMMSL, /* max # of semaphores per id */
187 SEMOPM, /* max # of operations per semop call */
188 SEMUME, /* max # of undo entries per process */
189 SEMUSZ, /* size in bytes of undo structure */
190 SEMVMX, /* semaphore maximum value */
191 SEMAEM /* adjust on exit max value */
194 SYSCTL_INT(_kern_ipc, OID_AUTO, semmap, CTLFLAG_RW, &seminfo.semmap, 0,
195 "Number of entries in the semaphore map");
196 SYSCTL_INT(_kern_ipc, OID_AUTO, semmni, CTLFLAG_RDTUN, &seminfo.semmni, 0,
197 "Number of semaphore identifiers");
198 SYSCTL_INT(_kern_ipc, OID_AUTO, semmns, CTLFLAG_RDTUN, &seminfo.semmns, 0,
199 "Maximum number of semaphores in the system");
200 SYSCTL_INT(_kern_ipc, OID_AUTO, semmnu, CTLFLAG_RDTUN, &seminfo.semmnu, 0,
201 "Maximum number of undo structures in the system");
202 SYSCTL_INT(_kern_ipc, OID_AUTO, semmsl, CTLFLAG_RW, &seminfo.semmsl, 0,
203 "Max semaphores per id");
204 SYSCTL_INT(_kern_ipc, OID_AUTO, semopm, CTLFLAG_RDTUN, &seminfo.semopm, 0,
205 "Max operations per semop call");
206 SYSCTL_INT(_kern_ipc, OID_AUTO, semume, CTLFLAG_RDTUN, &seminfo.semume, 0,
207 "Max undo entries per process");
208 SYSCTL_INT(_kern_ipc, OID_AUTO, semusz, CTLFLAG_RDTUN, &seminfo.semusz, 0,
209 "Size in bytes of undo structure");
210 SYSCTL_INT(_kern_ipc, OID_AUTO, semvmx, CTLFLAG_RW, &seminfo.semvmx, 0,
211 "Semaphore maximum value");
212 SYSCTL_INT(_kern_ipc, OID_AUTO, semaem, CTLFLAG_RW, &seminfo.semaem, 0,
213 "Adjust on exit max value");
214 SYSCTL_PROC(_kern_ipc, OID_AUTO, sema, CTLFLAG_RD,
215 NULL, 0, sysctl_sema, "", "");
222 TUNABLE_INT_FETCH("kern.ipc.semmap", &seminfo.semmap);
223 TUNABLE_INT_FETCH("kern.ipc.semmni", &seminfo.semmni);
224 TUNABLE_INT_FETCH("kern.ipc.semmns", &seminfo.semmns);
225 TUNABLE_INT_FETCH("kern.ipc.semmnu", &seminfo.semmnu);
226 TUNABLE_INT_FETCH("kern.ipc.semmsl", &seminfo.semmsl);
227 TUNABLE_INT_FETCH("kern.ipc.semopm", &seminfo.semopm);
228 TUNABLE_INT_FETCH("kern.ipc.semume", &seminfo.semume);
229 TUNABLE_INT_FETCH("kern.ipc.semusz", &seminfo.semusz);
230 TUNABLE_INT_FETCH("kern.ipc.semvmx", &seminfo.semvmx);
231 TUNABLE_INT_FETCH("kern.ipc.semaem", &seminfo.semaem);
233 sem = malloc(sizeof(struct sem) * seminfo.semmns, M_SEM, M_WAITOK);
234 sema = malloc(sizeof(struct semid_kernel) * seminfo.semmni, M_SEM,
236 sema_mtx = malloc(sizeof(struct mtx) * seminfo.semmni, M_SEM,
238 semu = malloc(seminfo.semmnu * seminfo.semusz, M_SEM, M_WAITOK);
240 for (i = 0; i < seminfo.semmni; i++) {
241 sema[i].u.sem_base = 0;
242 sema[i].u.sem_perm.mode = 0;
243 sema[i].u.sem_perm.seq = 0;
245 mac_sysvsem_init(&sema[i]);
248 for (i = 0; i < seminfo.semmni; i++)
249 mtx_init(&sema_mtx[i], "semid", NULL, MTX_DEF);
250 LIST_INIT(&semu_free_list);
251 for (i = 0; i < seminfo.semmnu; i++) {
252 struct sem_undo *suptr = SEMU(i);
253 suptr->un_proc = NULL;
254 LIST_INSERT_HEAD(&semu_free_list, suptr, un_next);
256 LIST_INIT(&semu_list);
257 mtx_init(&sem_mtx, "sem", NULL, MTX_DEF);
258 mtx_init(&sem_undo_mtx, "semu", NULL, MTX_DEF);
259 semexit_tag = EVENTHANDLER_REGISTER(process_exit, semexit_myhook, NULL,
260 EVENTHANDLER_PRI_ANY);
272 EVENTHANDLER_DEREGISTER(process_exit, semexit_tag);
274 for (i = 0; i < seminfo.semmni; i++)
275 mac_sysvsem_destroy(&sema[i]);
280 for (i = 0; i < seminfo.semmni; i++)
281 mtx_destroy(&sema_mtx[i]);
282 free(sema_mtx, M_SEM);
283 mtx_destroy(&sem_mtx);
284 mtx_destroy(&sem_undo_mtx);
289 sysvsem_modload(struct module *module, int cmd, void *arg)
309 static moduledata_t sysvsem_mod = {
315 SYSCALL_MODULE_HELPER(__semctl);
316 SYSCALL_MODULE_HELPER(semget);
317 SYSCALL_MODULE_HELPER(semop);
319 DECLARE_MODULE(sysvsem, sysvsem_mod, SI_SUB_SYSV_SEM, SI_ORDER_FIRST);
320 MODULE_VERSION(sysvsem, 1);
323 * Allocate a new sem_undo structure for a process
324 * (returns ptr to structure or NULL if no more room)
327 static struct sem_undo *
328 semu_alloc(struct thread *td)
330 struct sem_undo *suptr;
332 SEMUNDO_LOCKASSERT(MA_OWNED);
333 if ((suptr = LIST_FIRST(&semu_free_list)) == NULL)
335 LIST_REMOVE(suptr, un_next);
336 LIST_INSERT_HEAD(&semu_list, suptr, un_next);
338 suptr->un_proc = td->td_proc;
343 semu_try_free(struct sem_undo *suptr)
346 SEMUNDO_LOCKASSERT(MA_OWNED);
348 if (suptr->un_cnt != 0)
350 LIST_REMOVE(suptr, un_next);
351 LIST_INSERT_HEAD(&semu_free_list, suptr, un_next);
356 * Adjust a particular entry for a particular proc
360 semundo_adjust(struct thread *td, struct sem_undo **supptr, int semid,
361 int semseq, int semnum, int adjval)
363 struct proc *p = td->td_proc;
364 struct sem_undo *suptr;
368 SEMUNDO_LOCKASSERT(MA_OWNED);
369 /* Look for and remember the sem_undo if the caller doesn't provide
374 LIST_FOREACH(suptr, &semu_list, un_next) {
375 if (suptr->un_proc == p) {
383 suptr = semu_alloc(td);
391 * Look for the requested entry and adjust it (delete if adjval becomes
394 sunptr = &suptr->un_ent[0];
395 for (i = 0; i < suptr->un_cnt; i++, sunptr++) {
396 if (sunptr->un_id != semid || sunptr->un_num != semnum)
399 adjval += sunptr->un_adjval;
400 if (adjval > seminfo.semaem || adjval < -seminfo.semaem)
403 sunptr->un_adjval = adjval;
404 if (sunptr->un_adjval == 0) {
406 if (i < suptr->un_cnt)
408 suptr->un_ent[suptr->un_cnt];
409 if (suptr->un_cnt == 0)
410 semu_try_free(suptr);
415 /* Didn't find the right entry - create it */
418 if (adjval > seminfo.semaem || adjval < -seminfo.semaem)
420 if (suptr->un_cnt != seminfo.semume) {
421 sunptr = &suptr->un_ent[suptr->un_cnt];
423 sunptr->un_adjval = adjval;
424 sunptr->un_id = semid;
425 sunptr->un_num = semnum;
426 sunptr->un_seq = semseq;
433 semundo_clear(int semid, int semnum)
435 struct sem_undo *suptr, *suptr1;
439 SEMUNDO_LOCKASSERT(MA_OWNED);
440 LIST_FOREACH_SAFE(suptr, &semu_list, un_next, suptr1) {
441 sunptr = &suptr->un_ent[0];
442 for (i = 0; i < suptr->un_cnt; i++, sunptr++) {
443 if (sunptr->un_id != semid)
445 if (semnum == -1 || sunptr->un_num == semnum) {
447 if (i < suptr->un_cnt) {
449 suptr->un_ent[suptr->un_cnt];
452 semu_try_free(suptr);
461 semvalid(int semid, struct semid_kernel *semakptr)
464 return ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
465 semakptr->u.sem_perm.seq != IPCID_TO_SEQ(semid) ? EINVAL : 0);
469 * Note that the user-mode half of this passes a union, not a pointer.
471 #ifndef _SYS_SYSPROTO_H_
472 struct __semctl_args {
480 __semctl(struct thread *td, struct __semctl_args *uap)
482 struct semid_ds dsbuf;
483 union semun arg, semun;
494 error = copyin(uap->arg, &arg, sizeof(arg));
506 error = copyin(arg.buf, &dsbuf, sizeof(dsbuf));
513 semun.array = arg.array;
520 error = kern_semctl(td, uap->semid, uap->semnum, uap->cmd, &semun,
528 error = copyout(&dsbuf, arg.buf, sizeof(dsbuf));
533 td->td_retval[0] = rval;
538 kern_semctl(struct thread *td, int semid, int semnum, int cmd,
539 union semun *arg, register_t *rval)
542 struct ucred *cred = td->td_ucred;
544 struct semid_ds *sbuf;
545 struct semid_kernel *semakptr;
546 struct mtx *sema_mtxp;
547 u_short usval, count;
550 DPRINTF(("call to semctl(%d, %d, %d, 0x%p)\n",
551 semid, semnum, cmd, arg));
552 if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
560 * For this command we assume semid is an array index
561 * rather than an IPC id.
563 if (semid < 0 || semid >= seminfo.semmni)
565 semakptr = &sema[semid];
566 sema_mtxp = &sema_mtx[semid];
568 if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0) {
572 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_R)))
575 error = mac_sysvsem_check_semctl(cred, semakptr, cmd);
579 bcopy(&semakptr->u, arg->buf, sizeof(struct semid_ds));
580 *rval = IXSEQ_TO_IPCID(semid, semakptr->u.sem_perm);
581 mtx_unlock(sema_mtxp);
585 semidx = IPCID_TO_IX(semid);
586 if (semidx < 0 || semidx >= seminfo.semmni)
589 semakptr = &sema[semidx];
590 sema_mtxp = &sema_mtx[semidx];
595 error = mac_sysvsem_check_semctl(cred, semakptr, cmd);
605 if ((error = semvalid(semid, semakptr)) != 0)
607 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_M)))
609 semakptr->u.sem_perm.cuid = cred->cr_uid;
610 semakptr->u.sem_perm.uid = cred->cr_uid;
611 semakptr->u.sem_perm.mode = 0;
613 semundo_clear(semidx, -1);
616 mac_sysvsem_cleanup(semakptr);
619 for (i = 0; i < seminfo.semmni; i++) {
620 if ((sema[i].u.sem_perm.mode & SEM_ALLOC) &&
621 sema[i].u.sem_base > semakptr->u.sem_base)
622 mtx_lock_flags(&sema_mtx[i], LOP_DUPOK);
624 for (i = semakptr->u.sem_base - sem; i < semtot; i++)
625 sem[i] = sem[i + semakptr->u.sem_nsems];
626 for (i = 0; i < seminfo.semmni; i++) {
627 if ((sema[i].u.sem_perm.mode & SEM_ALLOC) &&
628 sema[i].u.sem_base > semakptr->u.sem_base) {
629 sema[i].u.sem_base -= semakptr->u.sem_nsems;
630 mtx_unlock(&sema_mtx[i]);
633 semtot -= semakptr->u.sem_nsems;
637 if ((error = semvalid(semid, semakptr)) != 0)
639 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_M)))
642 semakptr->u.sem_perm.uid = sbuf->sem_perm.uid;
643 semakptr->u.sem_perm.gid = sbuf->sem_perm.gid;
644 semakptr->u.sem_perm.mode = (semakptr->u.sem_perm.mode &
645 ~0777) | (sbuf->sem_perm.mode & 0777);
646 semakptr->u.sem_ctime = time_second;
650 if ((error = semvalid(semid, semakptr)) != 0)
652 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_R)))
654 bcopy(&semakptr->u, arg->buf, sizeof(struct semid_ds));
658 if ((error = semvalid(semid, semakptr)) != 0)
660 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_R)))
662 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
666 *rval = semakptr->u.sem_base[semnum].semncnt;
670 if ((error = semvalid(semid, semakptr)) != 0)
672 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_R)))
674 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
678 *rval = semakptr->u.sem_base[semnum].sempid;
682 if ((error = semvalid(semid, semakptr)) != 0)
684 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_R)))
686 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
690 *rval = semakptr->u.sem_base[semnum].semval;
695 * Unfortunately, callers of this function don't know
696 * in advance how many semaphores are in this set.
697 * While we could just allocate the maximum size array
698 * and pass the actual size back to the caller, that
699 * won't work for SETALL since we can't copyin() more
700 * data than the user specified as we may return a
703 * Note that the number of semaphores in a set is
704 * fixed for the life of that set. The only way that
705 * the 'count' could change while are blocked in
706 * malloc() is if this semaphore set were destroyed
707 * and a new one created with the same index.
708 * However, semvalid() will catch that due to the
709 * sequence number unless exactly 0x8000 (or a
710 * multiple thereof) semaphore sets for the same index
711 * are created and destroyed while we are in malloc!
714 count = semakptr->u.sem_nsems;
715 mtx_unlock(sema_mtxp);
716 array = malloc(sizeof(*array) * count, M_TEMP, M_WAITOK);
718 if ((error = semvalid(semid, semakptr)) != 0)
720 KASSERT(count == semakptr->u.sem_nsems, ("nsems changed"));
721 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_R)))
723 for (i = 0; i < semakptr->u.sem_nsems; i++)
724 array[i] = semakptr->u.sem_base[i].semval;
725 mtx_unlock(sema_mtxp);
726 error = copyout(array, arg->array, count * sizeof(*array));
731 if ((error = semvalid(semid, semakptr)) != 0)
733 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_R)))
735 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
739 *rval = semakptr->u.sem_base[semnum].semzcnt;
743 if ((error = semvalid(semid, semakptr)) != 0)
745 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_W)))
747 if (semnum < 0 || semnum >= semakptr->u.sem_nsems) {
751 if (arg->val < 0 || arg->val > seminfo.semvmx) {
755 semakptr->u.sem_base[semnum].semval = arg->val;
757 semundo_clear(semidx, semnum);
764 * See comment on GETALL for why 'count' shouldn't change
765 * and why we require a userland buffer.
767 count = semakptr->u.sem_nsems;
768 mtx_unlock(sema_mtxp);
769 array = malloc(sizeof(*array) * count, M_TEMP, M_WAITOK);
770 error = copyin(arg->array, array, count * sizeof(*array));
774 if ((error = semvalid(semid, semakptr)) != 0)
776 KASSERT(count == semakptr->u.sem_nsems, ("nsems changed"));
777 if ((error = ipcperm(td, &semakptr->u.sem_perm, IPC_W)))
779 for (i = 0; i < semakptr->u.sem_nsems; i++) {
781 if (usval > seminfo.semvmx) {
785 semakptr->u.sem_base[i].semval = usval;
788 semundo_clear(semidx, -1);
799 mtx_unlock(sema_mtxp);
801 mtx_unlock(&sem_mtx);
807 #ifndef _SYS_SYSPROTO_H_
815 semget(struct thread *td, struct semget_args *uap)
817 int semid, error = 0;
819 int nsems = uap->nsems;
820 int semflg = uap->semflg;
821 struct ucred *cred = td->td_ucred;
823 DPRINTF(("semget(0x%x, %d, 0%o)\n", key, nsems, semflg));
824 if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
828 if (key != IPC_PRIVATE) {
829 for (semid = 0; semid < seminfo.semmni; semid++) {
830 if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) &&
831 sema[semid].u.sem_perm.key == key)
834 if (semid < seminfo.semmni) {
835 DPRINTF(("found public key\n"));
836 if ((error = ipcperm(td, &sema[semid].u.sem_perm,
840 if (nsems > 0 && sema[semid].u.sem_nsems < nsems) {
841 DPRINTF(("too small\n"));
845 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) {
846 DPRINTF(("not exclusive\n"));
851 error = mac_sysvsem_check_semget(cred, &sema[semid]);
859 DPRINTF(("need to allocate the semid_kernel\n"));
860 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) {
861 if (nsems <= 0 || nsems > seminfo.semmsl) {
862 DPRINTF(("nsems out of range (0<%d<=%d)\n", nsems,
867 if (nsems > seminfo.semmns - semtot) {
869 "not enough semaphores left (need %d, got %d)\n",
870 nsems, seminfo.semmns - semtot));
874 for (semid = 0; semid < seminfo.semmni; semid++) {
875 if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) == 0)
878 if (semid == seminfo.semmni) {
879 DPRINTF(("no more semid_kernel's available\n"));
883 DPRINTF(("semid %d is available\n", semid));
884 mtx_lock(&sema_mtx[semid]);
885 KASSERT((sema[semid].u.sem_perm.mode & SEM_ALLOC) == 0,
886 ("Lost semaphore %d", semid));
887 sema[semid].u.sem_perm.key = key;
888 sema[semid].u.sem_perm.cuid = cred->cr_uid;
889 sema[semid].u.sem_perm.uid = cred->cr_uid;
890 sema[semid].u.sem_perm.cgid = cred->cr_gid;
891 sema[semid].u.sem_perm.gid = cred->cr_gid;
892 sema[semid].u.sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
893 sema[semid].u.sem_perm.seq =
894 (sema[semid].u.sem_perm.seq + 1) & 0x7fff;
895 sema[semid].u.sem_nsems = nsems;
896 sema[semid].u.sem_otime = 0;
897 sema[semid].u.sem_ctime = time_second;
898 sema[semid].u.sem_base = &sem[semtot];
900 bzero(sema[semid].u.sem_base,
901 sizeof(sema[semid].u.sem_base[0])*nsems);
903 mac_sysvsem_create(cred, &sema[semid]);
905 mtx_unlock(&sema_mtx[semid]);
906 DPRINTF(("sembase = %p, next = %p\n",
907 sema[semid].u.sem_base, &sem[semtot]));
909 DPRINTF(("didn't find it and wasn't asked to create it\n"));
915 td->td_retval[0] = IXSEQ_TO_IPCID(semid, sema[semid].u.sem_perm);
917 mtx_unlock(&sem_mtx);
921 #ifndef _SYS_SYSPROTO_H_
929 semop(struct thread *td, struct semop_args *uap)
932 struct sembuf small_sops[SMALL_SOPS];
933 int semid = uap->semid;
934 size_t nsops = uap->nsops;
936 struct semid_kernel *semakptr;
937 struct sembuf *sopptr = 0;
938 struct sem *semptr = 0;
939 struct sem_undo *suptr;
940 struct mtx *sema_mtxp;
943 int do_wakeup, do_undos;
949 DPRINTF(("call to semop(%d, %p, %u)\n", semid, sops, nsops));
951 if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
954 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
956 if (semid < 0 || semid >= seminfo.semmni)
959 /* Allocate memory for sem_ops */
960 if (nsops <= SMALL_SOPS)
962 else if (nsops <= seminfo.semopm)
963 sops = malloc(nsops * sizeof(*sops), M_TEMP, M_WAITOK);
965 DPRINTF(("too many sops (max=%d, nsops=%d)\n", seminfo.semopm,
969 if ((error = copyin(uap->sops, sops, nsops * sizeof(sops[0]))) != 0) {
970 DPRINTF(("error = %d from copyin(%p, %p, %d)\n", error,
971 uap->sops, sops, nsops * sizeof(sops[0])));
972 if (sops != small_sops)
977 semakptr = &sema[semid];
978 sema_mtxp = &sema_mtx[semid];
980 if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0) {
984 seq = semakptr->u.sem_perm.seq;
985 if (seq != IPCID_TO_SEQ(uap->semid)) {
990 * Initial pass thru sops to see what permissions are needed.
991 * Also perform any checks that don't need repeating on each
992 * attempt to satisfy the request vector.
994 j = 0; /* permission needed */
996 for (i = 0; i < nsops; i++) {
998 if (sopptr->sem_num >= semakptr->u.sem_nsems) {
1002 if (sopptr->sem_flg & SEM_UNDO && sopptr->sem_op != 0)
1004 j |= (sopptr->sem_op == 0) ? SEM_R : SEM_A;
1007 if ((error = ipcperm(td, &semakptr->u.sem_perm, j))) {
1008 DPRINTF(("error = %d from ipaccess\n", error));
1012 error = mac_sysvsem_check_semop(td->td_ucred, semakptr, j);
1018 * Loop trying to satisfy the vector of requests.
1019 * If we reach a point where we must wait, any requests already
1020 * performed are rolled back and we go to sleep until some other
1021 * process wakes us up. At this point, we start all over again.
1023 * This ensures that from the perspective of other tasks, a set
1024 * of requests is atomic (never partially satisfied).
1028 error = 0; /* error return if necessary */
1030 for (i = 0; i < nsops; i++) {
1032 semptr = &semakptr->u.sem_base[sopptr->sem_num];
1035 "semop: semakptr=%p, sem_base=%p, "
1036 "semptr=%p, sem[%d]=%d : op=%d, flag=%s\n",
1037 semakptr, semakptr->u.sem_base, semptr,
1038 sopptr->sem_num, semptr->semval, sopptr->sem_op,
1039 (sopptr->sem_flg & IPC_NOWAIT) ?
1040 "nowait" : "wait"));
1042 if (sopptr->sem_op < 0) {
1043 if (semptr->semval + sopptr->sem_op < 0) {
1044 DPRINTF(("semop: can't do it now\n"));
1047 semptr->semval += sopptr->sem_op;
1048 if (semptr->semval == 0 &&
1049 semptr->semzcnt > 0)
1052 } else if (sopptr->sem_op == 0) {
1053 if (semptr->semval != 0) {
1054 DPRINTF(("semop: not zero now\n"));
1057 } else if (semptr->semval + sopptr->sem_op >
1062 if (semptr->semncnt > 0)
1064 semptr->semval += sopptr->sem_op;
1069 * Did we get through the entire vector?
1075 * No ... rollback anything that we've already done
1077 DPRINTF(("semop: rollback 0 through %d\n", i-1));
1078 for (j = 0; j < i; j++)
1079 semakptr->u.sem_base[sops[j].sem_num].semval -=
1082 /* If we detected an error, return it */
1087 * If the request that we couldn't satisfy has the
1088 * NOWAIT flag set then return with EAGAIN.
1090 if (sopptr->sem_flg & IPC_NOWAIT) {
1095 if (sopptr->sem_op == 0)
1100 DPRINTF(("semop: good night!\n"));
1101 error = msleep(semakptr, sema_mtxp, (PZERO - 4) | PCATCH,
1103 DPRINTF(("semop: good morning (error=%d)!\n", error));
1104 /* return code is checked below, after sem[nz]cnt-- */
1107 * Make sure that the semaphore still exists
1109 seq = semakptr->u.sem_perm.seq;
1110 if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
1111 seq != IPCID_TO_SEQ(uap->semid)) {
1117 * Renew the semaphore's pointer after wakeup since
1118 * during msleep sem_base may have been modified and semptr
1119 * is not valid any more
1121 semptr = &semakptr->u.sem_base[sopptr->sem_num];
1124 * The semaphore is still alive. Readjust the count of
1125 * waiting processes.
1127 if (sopptr->sem_op == 0)
1133 * Is it really morning, or was our sleep interrupted?
1134 * (Delayed check of msleep() return code because we
1135 * need to decrement sem[nz]cnt either way.)
1141 DPRINTF(("semop: good morning!\n"));
1146 * Process any SEM_UNDO requests.
1151 for (i = 0; i < nsops; i++) {
1153 * We only need to deal with SEM_UNDO's for non-zero
1158 if ((sops[i].sem_flg & SEM_UNDO) == 0)
1160 adjval = sops[i].sem_op;
1163 error = semundo_adjust(td, &suptr, semid, seq,
1164 sops[i].sem_num, -adjval);
1169 * Oh-Oh! We ran out of either sem_undo's or undo's.
1170 * Rollback the adjustments to this point and then
1171 * rollback the semaphore ups and down so we can return
1172 * with an error with all structures restored. We
1173 * rollback the undo's in the exact reverse order that
1174 * we applied them. This guarantees that we won't run
1175 * out of space as we roll things back out.
1177 for (j = 0; j < i; j++) {
1179 if ((sops[k].sem_flg & SEM_UNDO) == 0)
1181 adjval = sops[k].sem_op;
1184 if (semundo_adjust(td, &suptr, semid, seq,
1185 sops[k].sem_num, adjval) != 0)
1186 panic("semop - can't undo undos");
1189 for (j = 0; j < nsops; j++)
1190 semakptr->u.sem_base[sops[j].sem_num].semval -=
1193 DPRINTF(("error = %d from semundo_adjust\n", error));
1196 } /* loop through the sops */
1198 } /* if (do_undos) */
1200 /* We're definitely done - set the sempid's and time */
1201 for (i = 0; i < nsops; i++) {
1203 semptr = &semakptr->u.sem_base[sopptr->sem_num];
1204 semptr->sempid = td->td_proc->p_pid;
1206 semakptr->u.sem_otime = time_second;
1209 * Do a wakeup if any semaphore was up'd whilst something was
1213 DPRINTF(("semop: doing wakeup\n"));
1215 DPRINTF(("semop: back from wakeup\n"));
1217 DPRINTF(("semop: done\n"));
1218 td->td_retval[0] = 0;
1220 mtx_unlock(sema_mtxp);
1221 if (sops != small_sops)
1227 * Go through the undo structures for this process and apply the adjustments to
1231 semexit_myhook(void *arg, struct proc *p)
1233 struct sem_undo *suptr;
1234 struct semid_kernel *semakptr;
1235 struct mtx *sema_mtxp;
1236 int semid, semnum, adjval, ix;
1240 * Go through the chain of undo vectors looking for one
1241 * associated with this process.
1244 LIST_FOREACH(suptr, &semu_list, un_next) {
1245 if (suptr->un_proc == p)
1248 if (suptr == NULL) {
1252 LIST_REMOVE(suptr, un_next);
1254 DPRINTF(("proc @%p has undo structure with %d entries\n", p,
1258 * If there are any active undo elements then process them.
1260 if (suptr->un_cnt > 0) {
1262 for (ix = 0; ix < suptr->un_cnt; ix++) {
1263 semid = suptr->un_ent[ix].un_id;
1264 semnum = suptr->un_ent[ix].un_num;
1265 adjval = suptr->un_ent[ix].un_adjval;
1266 seq = suptr->un_ent[ix].un_seq;
1267 semakptr = &sema[semid];
1268 sema_mtxp = &sema_mtx[semid];
1270 mtx_lock(sema_mtxp);
1271 if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 ||
1272 (semakptr->u.sem_perm.seq != seq)) {
1273 mtx_unlock(sema_mtxp);
1276 if (semnum >= semakptr->u.sem_nsems)
1277 panic("semexit - semnum out of range");
1280 "semexit: %p id=%d num=%d(adj=%d) ; sem=%d\n",
1281 suptr->un_proc, suptr->un_ent[ix].un_id,
1282 suptr->un_ent[ix].un_num,
1283 suptr->un_ent[ix].un_adjval,
1284 semakptr->u.sem_base[semnum].semval));
1286 if (adjval < 0 && semakptr->u.sem_base[semnum].semval <
1288 semakptr->u.sem_base[semnum].semval = 0;
1290 semakptr->u.sem_base[semnum].semval += adjval;
1293 DPRINTF(("semexit: back from wakeup\n"));
1294 mtx_unlock(sema_mtxp);
1300 * Deallocate the undo vector.
1302 DPRINTF(("removing vector\n"));
1303 suptr->un_proc = NULL;
1305 LIST_INSERT_HEAD(&semu_free_list, suptr, un_next);
1310 sysctl_sema(SYSCTL_HANDLER_ARGS)
1313 return (SYSCTL_OUT(req, sema,
1314 sizeof(struct semid_kernel) * seminfo.semmni));
1317 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
1318 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
1319 SYSCALL_MODULE_HELPER(semsys);
1320 SYSCALL_MODULE_HELPER(freebsd7___semctl);
1322 /* XXX casting to (sy_call_t *) is bogus, as usual. */
1323 static sy_call_t *semcalls[] = {
1324 (sy_call_t *)freebsd7___semctl, (sy_call_t *)semget,
1329 * Entry point for all SEM calls.
1334 /* XXX actually varargs. */
1335 struct semsys_args /* {
1345 if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
1347 if (uap->which < 0 ||
1348 uap->which >= sizeof(semcalls)/sizeof(semcalls[0]))
1350 error = (*semcalls[uap->which])(td, &uap->a2);
1354 #define CP(src, dst, fld) do { (dst).fld = (src).fld; } while (0)
1356 #ifndef _SYS_SYSPROTO_H_
1357 struct freebsd7___semctl_args {
1361 union semun_old *arg;
1365 freebsd7___semctl(struct thread *td, struct freebsd7___semctl_args *uap)
1367 struct semid_ds_old dsold;
1368 struct semid_ds dsbuf;
1369 union semun_old arg;
1381 error = copyin(uap->arg, &arg, sizeof(arg));
1393 error = copyin(arg.buf, &dsold, sizeof(dsold));
1396 ipcperm_old2new(&dsold.sem_perm, &dsbuf.sem_perm);
1397 CP(dsold, dsbuf, sem_base);
1398 CP(dsold, dsbuf, sem_nsems);
1399 CP(dsold, dsbuf, sem_otime);
1400 CP(dsold, dsbuf, sem_ctime);
1405 semun.array = arg.array;
1408 semun.val = arg.val;
1412 error = kern_semctl(td, uap->semid, uap->semnum, uap->cmd, &semun,
1420 bzero(&dsold, sizeof(dsold));
1421 ipcperm_new2old(&dsbuf.sem_perm, &dsold.sem_perm);
1422 CP(dsbuf, dsold, sem_base);
1423 CP(dsbuf, dsold, sem_nsems);
1424 CP(dsbuf, dsold, sem_otime);
1425 CP(dsbuf, dsold, sem_ctime);
1426 error = copyout(&dsold, arg.buf, sizeof(dsold));
1431 td->td_retval[0] = rval;
1437 #endif /* COMPAT_FREEBSD4 || COMPAT_FREEBSD5 || COMPAT_FREEBSD6 ||