2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright 1999, 2000 John D. Polstra.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * from: FreeBSD: src/libexec/rtld-elf/sparc64/lockdflt.c,v 1.3 2002/10/09
32 * Thread locking implementation for the dynamic linker.
34 * We use the "simple, non-scalable reader-preference lock" from:
36 * J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer
37 * Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on
38 * Principles and Practice of Parallel Programming, April 1991.
40 * In this algorithm the lock is a single word. Its low-order bit is
41 * set when a writer holds the lock. The remaining high-order bits
42 * contain a count of readers desiring the lock. The algorithm requires
43 * atomic "compare_and_store" and "add" operations, which we take
44 * from machine/atomic.h.
47 #include <sys/param.h>
54 #include "rtld_machdep.h"
56 void _rtld_thread_init(struct RtldLockInfo *) __exported;
57 void _rtld_atfork_pre(int *) __exported;
58 void _rtld_atfork_post(int *) __exported;
60 #define WAFLAG 0x1 /* A writer holds the lock */
61 #define RC_INCR 0x2 /* Adjusts count of readers desiring lock */
63 typedef struct Struct_Lock {
68 static sigset_t fullsigmask, oldsigmask;
69 static int thread_flag, wnested;
80 * Arrange for the lock to occupy its own cache line. First, we
81 * optimistically allocate just a cache line, hoping that malloc
82 * will give us a well-aligned block of memory. If that doesn't
83 * work, we allocate a larger block and take a well-aligned cache
86 base = xmalloc(CACHE_LINE_SIZE);
88 if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
90 base = xmalloc(2 * CACHE_LINE_SIZE);
92 if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
93 p += CACHE_LINE_SIZE - r;
102 def_lock_destroy(void *lock)
104 Lock *l = (Lock *)lock;
110 def_rlock_acquire(void *lock)
112 Lock *l = (Lock *)lock;
114 atomic_add_acq_int(&l->lock, RC_INCR);
115 while (l->lock & WAFLAG)
120 def_wlock_acquire(void *lock)
123 sigset_t tmp_oldsigmask;
127 sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask);
128 if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
130 sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
132 if (atomic_fetchadd_int(&wnested, 1) == 0)
133 oldsigmask = tmp_oldsigmask;
137 def_lock_release(void *lock)
142 if ((l->lock & WAFLAG) == 0)
143 atomic_add_rel_int(&l->lock, -RC_INCR);
146 atomic_add_rel_int(&l->lock, -WAFLAG);
147 if (atomic_fetchadd_int(&wnested, -1) == 1)
148 sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
153 def_thread_set_flag(int mask)
155 int old_val = thread_flag;
161 def_thread_clr_flag(int mask)
163 int old_val = thread_flag;
164 thread_flag &= ~mask;
169 * Public interface exposed to the rest of the dynamic linker.
171 static struct RtldLockInfo lockinfo;
172 static struct RtldLockInfo deflockinfo;
175 thread_mask_set(int mask)
177 return lockinfo.thread_set_flag(mask);
181 thread_mask_clear(int mask)
183 lockinfo.thread_clr_flag(mask);
186 #define RTLD_LOCK_CNT 3
187 static struct rtld_lock {
190 } rtld_locks[RTLD_LOCK_CNT];
192 rtld_lock_t rtld_bind_lock = &rtld_locks[0];
193 rtld_lock_t rtld_libc_lock = &rtld_locks[1];
194 rtld_lock_t rtld_phdr_lock = &rtld_locks[2];
197 rlock_acquire(rtld_lock_t lock, RtldLockState *lockstate)
200 if (lockstate == NULL)
203 if (thread_mask_set(lock->mask) & lock->mask) {
204 dbg("rlock_acquire: recursed");
205 lockstate->lockstate = RTLD_LOCK_UNLOCKED;
208 lockinfo.rlock_acquire(lock->handle);
209 lockstate->lockstate = RTLD_LOCK_RLOCKED;
213 wlock_acquire(rtld_lock_t lock, RtldLockState *lockstate)
216 if (lockstate == NULL)
219 if (thread_mask_set(lock->mask) & lock->mask) {
220 dbg("wlock_acquire: recursed");
221 lockstate->lockstate = RTLD_LOCK_UNLOCKED;
224 lockinfo.wlock_acquire(lock->handle);
225 lockstate->lockstate = RTLD_LOCK_WLOCKED;
229 lock_release(rtld_lock_t lock, RtldLockState *lockstate)
232 if (lockstate == NULL)
235 switch (lockstate->lockstate) {
236 case RTLD_LOCK_UNLOCKED:
238 case RTLD_LOCK_RLOCKED:
239 case RTLD_LOCK_WLOCKED:
240 thread_mask_clear(lock->mask);
241 lockinfo.lock_release(lock->handle);
249 lock_upgrade(rtld_lock_t lock, RtldLockState *lockstate)
252 if (lockstate == NULL)
255 lock_release(lock, lockstate);
256 wlock_acquire(lock, lockstate);
260 lock_restart_for_upgrade(RtldLockState *lockstate)
263 if (lockstate == NULL)
266 switch (lockstate->lockstate) {
267 case RTLD_LOCK_UNLOCKED:
268 case RTLD_LOCK_WLOCKED:
270 case RTLD_LOCK_RLOCKED:
271 siglongjmp(lockstate->env, 1);
283 deflockinfo.rtli_version = RTLI_VERSION;
284 deflockinfo.lock_create = def_lock_create;
285 deflockinfo.lock_destroy = def_lock_destroy;
286 deflockinfo.rlock_acquire = def_rlock_acquire;
287 deflockinfo.wlock_acquire = def_wlock_acquire;
288 deflockinfo.lock_release = def_lock_release;
289 deflockinfo.thread_set_flag = def_thread_set_flag;
290 deflockinfo.thread_clr_flag = def_thread_clr_flag;
291 deflockinfo.at_fork = NULL;
293 for (i = 0; i < RTLD_LOCK_CNT; i++) {
294 rtld_locks[i].mask = (1 << i);
295 rtld_locks[i].handle = NULL;
298 memcpy(&lockinfo, &deflockinfo, sizeof(lockinfo));
299 _rtld_thread_init(NULL);
301 * Construct a mask to block all signals except traps which might
302 * conceivably be generated within the dynamic linker itself.
304 sigfillset(&fullsigmask);
305 sigdelset(&fullsigmask, SIGILL);
306 sigdelset(&fullsigmask, SIGTRAP);
307 sigdelset(&fullsigmask, SIGABRT);
308 sigdelset(&fullsigmask, SIGEMT);
309 sigdelset(&fullsigmask, SIGFPE);
310 sigdelset(&fullsigmask, SIGBUS);
311 sigdelset(&fullsigmask, SIGSEGV);
312 sigdelset(&fullsigmask, SIGSYS);
316 * Callback function to allow threads implementation to
317 * register their own locking primitives if the default
318 * one is not suitable.
319 * The current context should be the only context
320 * executing at the invocation time.
323 _rtld_thread_init(struct RtldLockInfo *pli)
326 void *locks[RTLD_LOCK_CNT];
328 /* disable all locking while this function is running */
329 flags = thread_mask_set(~0);
335 for (i = 0; i < RTLD_LOCK_CNT; i++)
336 if ((locks[i] = pli->lock_create()) == NULL)
339 if (i < RTLD_LOCK_CNT) {
341 pli->lock_destroy(locks[i]);
345 for (i = 0; i < RTLD_LOCK_CNT; i++) {
346 if (rtld_locks[i].handle == NULL)
348 if (flags & rtld_locks[i].mask)
349 lockinfo.lock_release(rtld_locks[i].handle);
350 lockinfo.lock_destroy(rtld_locks[i].handle);
353 for (i = 0; i < RTLD_LOCK_CNT; i++) {
354 rtld_locks[i].handle = locks[i];
355 if (flags & rtld_locks[i].mask)
356 pli->wlock_acquire(rtld_locks[i].handle);
359 lockinfo.lock_create = pli->lock_create;
360 lockinfo.lock_destroy = pli->lock_destroy;
361 lockinfo.rlock_acquire = pli->rlock_acquire;
362 lockinfo.wlock_acquire = pli->wlock_acquire;
363 lockinfo.lock_release = pli->lock_release;
364 lockinfo.thread_set_flag = pli->thread_set_flag;
365 lockinfo.thread_clr_flag = pli->thread_clr_flag;
366 lockinfo.at_fork = pli->at_fork;
368 /* restore thread locking state, this time with new locks */
369 thread_mask_clear(~0);
370 thread_mask_set(flags);
371 dbg("_rtld_thread_init: done");
375 _rtld_atfork_pre(int *locks)
383 * Warning: this did not worked well with the rtld compat
384 * locks above, when the thread signal mask was corrupted (set
385 * to all signals blocked) if two locks were taken
386 * simultaneously in the write mode. The caller of the
387 * _rtld_atfork_pre() must provide the working implementation
388 * of the locks anyway, and libthr locks are fine.
390 wlock_acquire(rtld_phdr_lock, &ls[0]);
391 wlock_acquire(rtld_bind_lock, &ls[1]);
393 /* XXXKIB: I am really sorry for this. */
394 locks[0] = ls[1].lockstate;
395 locks[2] = ls[0].lockstate;
399 _rtld_atfork_post(int *locks)
406 bzero(ls, sizeof(ls));
407 ls[0].lockstate = locks[2];
408 ls[1].lockstate = locks[0];
409 lock_release(rtld_bind_lock, &ls[1]);
410 lock_release(rtld_phdr_lock, &ls[0]);