2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * 6.1 : Mutual Exclusion and Synchronisation
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <contrib/dev/acpica/acpi.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/sysctl.h>
42 #include <sys/mutex.h>
44 #define _COMPONENT ACPI_OS_SERVICES
45 ACPI_MODULE_NAME("SYNCH")
47 MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
49 #define AS_LOCK(as) mtx_lock(&(as)->as_mtx)
50 #define AS_UNLOCK(as) mtx_unlock(&(as)->as_mtx)
53 * Simple counting semaphore implemented using a mutex. (Subsequently used
54 * in the OSI code to implement a mutex. Go figure.)
56 struct acpi_semaphore {
65 /* Default number of maximum pending threads. */
66 #ifndef ACPI_NO_SEMAPHORES
67 #ifndef ACPI_SEMAPHORES_MAX_PENDING
68 #define ACPI_SEMAPHORES_MAX_PENDING 4
71 static int acpi_semaphore_debug = 0;
72 TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
73 SYSCTL_DECL(_debug_acpi);
74 SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
75 &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
76 #endif /* !ACPI_NO_SEMAPHORES */
79 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
80 ACPI_SEMAPHORE *OutHandle)
82 #ifndef ACPI_NO_SEMAPHORES
83 struct acpi_semaphore *as;
85 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
87 if (OutHandle == NULL)
88 return_ACPI_STATUS (AE_BAD_PARAMETER);
89 if (InitialUnits > MaxUnits)
90 return_ACPI_STATUS (AE_BAD_PARAMETER);
92 if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
93 return_ACPI_STATUS (AE_NO_MEMORY);
95 mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF);
96 as->as_units = InitialUnits;
97 as->as_maxunits = MaxUnits;
98 as->as_pendings = as->as_resetting = as->as_timeouts = 0;
100 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
101 "created semaphore %p max %d, initial %d\n",
102 as, InitialUnits, MaxUnits));
104 *OutHandle = (ACPI_HANDLE)as;
106 *OutHandle = (ACPI_HANDLE)OutHandle;
107 #endif /* !ACPI_NO_SEMAPHORES */
109 return_ACPI_STATUS (AE_OK);
113 AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)
115 #ifndef ACPI_NO_SEMAPHORES
116 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
118 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
120 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
121 mtx_destroy(&as->as_mtx);
122 free(Handle, M_ACPISEM);
123 #endif /* !ACPI_NO_SEMAPHORES */
125 return_ACPI_STATUS (AE_OK);
129 * This implementation has a bug, in that it has to stall for the entire
130 * timeout before it will return AE_TIME. A better implementation would
131 * use getmicrotime() to correctly adjust the timeout after being woken up.
134 AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
136 #ifndef ACPI_NO_SEMAPHORES
138 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
140 struct timeval timeouttv, currenttv, timelefttv;
142 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
145 return_ACPI_STATUS (AE_BAD_PARAMETER);
148 return_ACPI_STATUS (AE_OK);
151 if (as->as_units < Units && as->as_timeouts > 10) {
152 printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
154 as->as_units = as->as_maxunits;
156 as->as_resetting = 1;
160 return_ACPI_STATUS (AE_TIME);
163 if (as->as_resetting)
164 return_ACPI_STATUS (AE_TIME);
167 /* a timeout of ACPI_WAIT_FOREVER means "forever" */
168 if (Timeout == ACPI_WAIT_FOREVER) {
170 timeouttv.tv_sec = ((0xffff/1000) + 1); /* cf. ACPI spec */
171 timeouttv.tv_usec = 0;
173 /* compute timeout using microseconds per tick */
174 tmo = (Timeout * 1000) / (1000000 / hz);
177 timeouttv.tv_sec = Timeout / 1000;
178 timeouttv.tv_usec = (Timeout % 1000) * 1000;
181 /* calculate timeout value in timeval */
182 getmicrotime(¤ttv);
183 timevaladd(&timeouttv, ¤ttv);
186 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
187 "get %d units from semaphore %p (has %d), timeout %d\n",
188 Units, as, as->as_units, Timeout));
190 if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
194 if (as->as_units >= Units) {
195 as->as_units -= Units;
200 /* limit number of pending threads */
201 if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
206 /* if timeout values of zero is specified, return immediately */
212 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
213 "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
214 as, &as->as_mtx, PCATCH, tmo));
218 if (acpi_semaphore_debug) {
219 printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
220 __func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId());
223 rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo);
228 if (as->as_resetting) {
229 /* semaphore reset, return immediately */
230 if (as->as_pendings == 0) {
231 as->as_resetting = 0;
238 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv));
239 if (rv == EWOULDBLOCK) {
244 /* check if we already awaited enough */
245 timelefttv = timeouttv;
246 getmicrotime(¤ttv);
247 timevalsub(&timelefttv, ¤ttv);
248 if (timelefttv.tv_sec < 0) {
249 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
255 /* adjust timeout for the next sleep */
256 tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
261 if (acpi_semaphore_debug) {
262 printf("%s: Wakeup timeleft(%jd, %lu), tmo %u, sem %p, thread %d\n",
263 __func__, (intmax_t)timelefttv.tv_sec, timelefttv.tv_usec, tmo, as,
264 AcpiOsGetThreadId());
268 if (acpi_semaphore_debug) {
269 if (result == AE_TIME && Timeout > 0) {
270 printf("%s: Timeout %d, pending %d, semaphore %p\n",
271 __func__, Timeout, as->as_pendings, as);
273 if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
274 printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
275 __func__, Units, as->as_units, as->as_pendings, as,
276 AcpiOsGetThreadId());
280 if (result == AE_TIME)
286 return_ACPI_STATUS (result);
288 return_ACPI_STATUS (AE_OK);
289 #endif /* !ACPI_NO_SEMAPHORES */
293 AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
295 #ifndef ACPI_NO_SEMAPHORES
296 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
298 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
301 return_ACPI_STATUS(AE_BAD_PARAMETER);
304 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
305 "return %d units to semaphore %p (has %d)\n",
306 Units, as, as->as_units));
307 if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
308 as->as_units += Units;
309 if (as->as_units > as->as_maxunits)
310 as->as_units = as->as_maxunits;
313 if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
314 printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
315 __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId());
320 #endif /* !ACPI_NO_SEMAPHORES */
322 return_ACPI_STATUS (AE_OK);
325 /* Combined mutex + mutex name storage since the latter must persist. */
326 struct acpi_spinlock {
332 AcpiOsCreateLock (ACPI_SPINLOCK *OutHandle)
334 struct acpi_spinlock *h;
336 if (OutHandle == NULL)
337 return (AE_BAD_PARAMETER);
338 h = malloc(sizeof(*h), M_ACPISEM, M_NOWAIT | M_ZERO);
340 return (AE_NO_MEMORY);
342 /* Build a unique name based on the address of the handle. */
343 if (OutHandle == &AcpiGbl_GpeLock)
344 snprintf(h->name, sizeof(h->name), "acpi subsystem GPE lock");
345 else if (OutHandle == &AcpiGbl_HardwareLock)
346 snprintf(h->name, sizeof(h->name), "acpi subsystem HW lock");
348 snprintf(h->name, sizeof(h->name), "acpi subsys %p", OutHandle);
349 mtx_init(&h->lock, h->name, NULL, MTX_DEF|MTX_RECURSE);
350 *OutHandle = (ACPI_SPINLOCK)h;
355 AcpiOsDeleteLock (ACPI_SPINLOCK Handle)
357 struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
361 mtx_destroy(&h->lock);
366 * The Flags parameter seems to state whether or not caller is an ISR
367 * (and thus can't block) but since we have ithreads, we don't worry
368 * about potentially blocking.
371 AcpiOsAcquireLock (ACPI_SPINLOCK Handle)
373 struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
382 AcpiOsReleaseLock (ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
384 struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
388 mtx_unlock(&h->lock);
391 /* Section 5.2.9.1: global lock acquire/release functions */
392 #define GL_ACQUIRED (-1)
394 #define GL_BIT_PENDING 0x1
395 #define GL_BIT_OWNED 0x2
396 #define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED)
399 * Acquire the global lock. If busy, set the pending bit. The caller
400 * will wait for notification from the BIOS that the lock is available
401 * and then attempt to acquire it again.
404 acpi_acquire_global_lock(uint32_t *lock)
410 new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
411 ((old >> 1) & GL_BIT_PENDING);
412 } while (atomic_cmpset_acq_int(lock, old, new) == 0);
414 return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
418 * Release the global lock, returning whether there is a waiter pending.
419 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
423 acpi_release_global_lock(uint32_t *lock)
429 new = old & ~GL_BIT_MASK;
430 } while (atomic_cmpset_rel_int(lock, old, new) == 0);
432 return (old & GL_BIT_PENDING);