1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "apr_arch_atomic.h"
18 #include "apr_thread_mutex.h"
20 #if defined(USE_ATOMICS_GENERIC) || defined (NEED_ATOMICS_GENERIC64)
25 # define DECLARE_MUTEX_LOCKED(name, mem) \
26 apr_thread_mutex_t *name = mutex_hash(mem)
27 # define MUTEX_UNLOCK(name) \
29 if (apr_thread_mutex_unlock(name) != APR_SUCCESS) \
33 # define DECLARE_MUTEX_LOCKED(name, mem)
34 # define MUTEX_UNLOCK(name)
35 # warning Be warned: using stubs for all atomic operations
40 static apr_thread_mutex_t **hash_mutex;
42 #define NUM_ATOMIC_HASH 7
43 /* shift by 2 to get rid of alignment issues */
44 #define ATOMIC_HASH(x) (unsigned int)(((unsigned long)(x)>>2)%(unsigned int)NUM_ATOMIC_HASH)
46 static apr_status_t atomic_cleanup(void *data)
48 if (hash_mutex == data)
54 apr_status_t apr__atomic_generic64_init(apr_pool_t *p)
59 if (hash_mutex != NULL)
62 hash_mutex = apr_palloc(p, sizeof(apr_thread_mutex_t*) * NUM_ATOMIC_HASH);
63 apr_pool_cleanup_register(p, hash_mutex, atomic_cleanup,
64 apr_pool_cleanup_null);
66 for (i = 0; i < NUM_ATOMIC_HASH; i++) {
67 rv = apr_thread_mutex_create(&(hash_mutex[i]),
68 APR_THREAD_MUTEX_DEFAULT, p);
69 if (rv != APR_SUCCESS) {
77 static APR_INLINE apr_thread_mutex_t *mutex_hash(volatile apr_uint64_t *mem)
79 apr_thread_mutex_t *mutex = hash_mutex[ATOMIC_HASH(mem)];
81 if (apr_thread_mutex_lock(mutex) != APR_SUCCESS) {
90 apr_status_t apr__atomic_generic64_init(apr_pool_t *p)
95 #endif /* APR_HAS_THREADS */
97 APR_DECLARE(apr_uint64_t) apr_atomic_read64(volatile apr_uint64_t *mem)
102 APR_DECLARE(void) apr_atomic_set64(volatile apr_uint64_t *mem, apr_uint64_t val)
104 DECLARE_MUTEX_LOCKED(mutex, mem);
111 APR_DECLARE(apr_uint64_t) apr_atomic_add64(volatile apr_uint64_t *mem, apr_uint64_t val)
113 apr_uint64_t old_value;
114 DECLARE_MUTEX_LOCKED(mutex, mem);
124 APR_DECLARE(void) apr_atomic_sub64(volatile apr_uint64_t *mem, apr_uint64_t val)
126 DECLARE_MUTEX_LOCKED(mutex, mem);
131 APR_DECLARE(apr_uint64_t) apr_atomic_inc64(volatile apr_uint64_t *mem)
133 return apr_atomic_add64(mem, 1);
136 APR_DECLARE(int) apr_atomic_dec64(volatile apr_uint64_t *mem)
139 DECLARE_MUTEX_LOCKED(mutex, mem);
149 APR_DECLARE(apr_uint64_t) apr_atomic_cas64(volatile apr_uint64_t *mem, apr_uint64_t with,
153 DECLARE_MUTEX_LOCKED(mutex, mem);
165 APR_DECLARE(apr_uint64_t) apr_atomic_xchg64(volatile apr_uint64_t *mem, apr_uint64_t val)
168 DECLARE_MUTEX_LOCKED(mutex, mem);
178 #endif /* USE_ATOMICS_GENERIC64 */