2 * svn_named_atomic.c: routines for machine-wide named atomics.
4 * ====================================================================
5 * Licensed to the Apache Software Foundation (ASF) under one
6 * or more contributor license agreements. See the NOTICE file
7 * distributed with this work for additional information
8 * regarding copyright ownership. The ASF licenses this file
9 * to you under the Apache License, Version 2.0 (the
10 * "License"); you may not use this file except in compliance
11 * with the License. You may obtain a copy of the License at
13 * http://www.apache.org/licenses/LICENSE-2.0
15 * Unless required by applicable law or agreed to in writing,
16 * software distributed under the License is distributed on an
17 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
18 * KIND, either express or implied. See the License for the
19 * specific language governing permissions and limitations
21 * ====================================================================
24 #include "private/svn_named_atomic.h"
26 #include <apr_global_mutex.h>
29 #include "svn_private_config.h"
30 #include "private/svn_atomic.h"
31 #include "private/svn_mutex.h"
32 #include "svn_pools.h"
33 #include "svn_dirent_uri.h"
36 /* Implementation aspects.
38 * We use a single shared memory block (memory mapped file) that will be
39 * created by the first user and merely mapped by all subsequent ones.
40 * The memory block contains an short header followed by a fixed-capacity
41 * array of named atomics. The number of entries currently in use is stored
44 * Finding / creating the MMAP object as well as adding new array entries
45 * is being guarded by an APR global mutex. Since releasing the MMAP
46 * structure and closing the underlying does not affect other users of the
47 * same, cleanup will not be synchronized.
49 * The array is append-only. Once a process mapped the block into its
50 * address space, it may freely access any of the used entries. However,
51 * it must synchronize access to the volatile data within the entries.
52 * On Windows and where otherwise supported by GCC, lightweight "lock-free"
53 * synchronization will be used. Other targets serialize all access using
56 * Atomics will be identified by their name (a short string) and lookup
57 * takes linear time. But even that takes only about 10 microseconds for a
58 * full array scan -- which is in the same order of magnitude than e.g. a
59 * single global mutex lock / unlock pair.
62 /* Capacity of our shared memory object, i.e. max number of named atomics
63 * that may be created. Should have the form 2**N - 1.
65 #define MAX_ATOMIC_COUNT 1023
67 /* We choose the size of a single named atomic object to fill a complete
68 * cache line (on most architectures). Thereby, we minimize the cache
69 * sync. overhead between different CPU cores.
71 #define CACHE_LINE_LENGTH 64
73 /* We need 8 bytes for the actual value and the remainder is used to
74 * store the NUL-terminated name.
76 * Must not be smaller than SVN_NAMED_ATOMIC__MAX_NAME_LENGTH.
78 #define MAX_NAME_LENGTH (CACHE_LINE_LENGTH - sizeof(apr_int64_t) - 1)
80 /* Particle that will be appended to the namespace name to form the
81 * name of the mutex / lock file used for that namespace.
83 #define MUTEX_NAME_SUFFIX ".mutex"
85 /* Particle that will be appended to the namespace name to form the
86 * name of the shared memory file that backs that namespace.
88 #define SHM_NAME_SUFFIX ".shm"
90 /* Platform-dependent implementations of our basic atomic operations.
91 * NA_SYNCHRONIZE(op) will ensure that the OP gets executed atomically.
92 * This will be zero-overhead if OP itself is already atomic.
94 * (We don't call it SYNCHRONIZE because Windows has a preprocess macro by
97 * The default implementation will use the same mutex for initialization
98 * as well as any type of data access. This is quite expensive and we
99 * can do much better on most platforms.
101 #if defined(WIN32) && ((_WIN32_WINNT >= 0x0502) || defined(InterlockedExchangeAdd64))
103 /* Interlocked API / intrinsics guarantee full data synchronization
105 #define synched_read(mem) *mem
106 #define synched_write(mem, value) InterlockedExchange64(mem, value)
107 #define synched_add(mem, delta) InterlockedExchangeAdd64(mem, delta)
108 #define synched_cmpxchg(mem, value, comperand) \
109 InterlockedCompareExchange64(mem, value, comperand)
111 #define NA_SYNCHRONIZE(_atomic,op) op;
112 #define NA_SYNCHRONIZE_IS_FAST TRUE
114 #elif SVN_HAS_ATOMIC_BUILTINS
116 /* GCC provides atomic intrinsics for most common CPU types
118 #define synched_read(mem) *mem
119 #define synched_write(mem, value) __sync_lock_test_and_set(mem, value)
120 #define synched_add(mem, delta) __sync_add_and_fetch(mem, delta)
121 #define synched_cmpxchg(mem, value, comperand) \
122 __sync_val_compare_and_swap(mem, comperand, value)
124 #define NA_SYNCHRONIZE(_atomic,op) op;
125 #define NA_SYNCHRONIZE_IS_FAST TRUE
129 /* Default implementation
132 synched_read(volatile apr_int64_t *mem)
138 synched_write(volatile apr_int64_t *mem, apr_int64_t value)
140 apr_int64_t old_value = *mem;
147 synched_add(volatile apr_int64_t *mem, apr_int64_t delta)
149 return *mem += delta;
153 synched_cmpxchg(volatile apr_int64_t *mem,
155 apr_int64_t comperand)
157 apr_int64_t old_value = *mem;
158 if (old_value == comperand)
164 #define NA_SYNCHRONIZE(_atomic,op)\
166 SVN_ERR(lock(_atomic->mutex));\
168 SVN_ERR(unlock(_atomic->mutex,SVN_NO_ERROR));\
171 #define NA_SYNCHRONIZE_IS_FAST FALSE
175 /* Structure describing a single atomic: its VALUE and NAME.
177 struct named_atomic_data_t
179 volatile apr_int64_t value;
180 char name[MAX_NAME_LENGTH + 1];
183 /* Content of our shared memory buffer. COUNT is the number
184 * of used entries in ATOMICS. Insertion is append-only.
185 * PADDING is used to align the header information with the
186 * atomics to create a favorable data alignment.
190 volatile apr_uint32_t count;
191 char padding [sizeof(struct named_atomic_data_t) - sizeof(apr_uint32_t)];
193 struct named_atomic_data_t atomics[MAX_ATOMIC_COUNT];
196 /* Structure combining all objects that we need for access serialization.
200 /* Inter-process sync. is handled by through lock file. */
201 apr_file_t *lock_file;
203 /* Pool to be used with lock / unlock functions */
207 /* API structure combining the atomic data and the access mutex
209 struct svn_named_atomic__t
211 /* pointer into the shared memory */
212 struct named_atomic_data_t *data;
214 /* sync. object; never NULL (even if unused) */
215 struct mutex_t *mutex;
218 /* This is intended to be a singleton struct. It contains all
219 * information necessary to initialize and access the shared
222 struct svn_atomic_namespace__t
224 /* Pointer to the shared data mapped into our process */
225 struct shared_data_t *data;
227 /* Last time we checked, this was the number of used
228 * (i.e. fully initialized) items. I.e. we can read
229 * their names without further sync. */
230 volatile svn_atomic_t min_used;
232 /* for each atomic in the shared memory, we hand out
233 * at most one API-level object. */
234 struct svn_named_atomic__t atomics[MAX_ATOMIC_COUNT];
236 /* Synchronization object for this namespace */
237 struct mutex_t mutex;
240 /* On most operating systems APR implements file locks per process, not
241 * per file. I.e. the lock file will only sync. among processes but within
242 * a process, we must use a mutex to sync the threads. */
243 /* Compare ../libsvn_fs_fs/fs.h:SVN_FS_FS__USE_LOCK_MUTEX */
244 #if APR_HAS_THREADS && !defined(WIN32)
245 #define USE_THREAD_MUTEX 1
247 #define USE_THREAD_MUTEX 0
250 /* Used for process-local thread sync.
252 static svn_mutex__t *thread_mutex = NULL;
255 /* Initialization flag for the above used by svn_atomic__init_once.
257 static volatile svn_atomic_t mutex_initialized = FALSE;
259 /* Initialize the thread sync. structures.
260 * To be called by svn_atomic__init_once.
263 init_thread_mutex(void *baton, apr_pool_t *pool)
265 /* let the mutex live as long as the APR */
266 apr_pool_t *global_pool = svn_pool_create(NULL);
268 return svn_mutex__init(&thread_mutex, USE_THREAD_MUTEX, global_pool);
270 #endif /* APR_HAS_MMAP */
272 /* Utility that acquires our global mutex and converts error types.
275 lock(struct mutex_t *mutex)
279 /* Get lock on the filehandle. */
280 SVN_ERR(svn_mutex__lock(thread_mutex));
281 err = svn_io_lock_open_file(mutex->lock_file, TRUE, FALSE, mutex->pool);
284 ? svn_mutex__unlock(thread_mutex, err)
288 /* Utility that releases the lock previously acquired via lock(). If the
289 * unlock succeeds and OUTER_ERR is not NULL, OUTER_ERR will be returned.
290 * Otherwise, return the result of the unlock operation.
293 unlock(struct mutex_t *mutex, svn_error_t * outer_err)
295 svn_error_t *unlock_err
296 = svn_io_unlock_open_file(mutex->lock_file, mutex->pool);
297 return svn_mutex__unlock(thread_mutex,
298 svn_error_compose_create(outer_err,
303 /* The last user to close a particular namespace should also remove the
304 * lock file. Failure to do so, however, does not affect further uses
305 * of the same namespace.
308 delete_lock_file(void *arg)
310 struct mutex_t *mutex = arg;
311 const char *lock_name = NULL;
313 /* locks have already been cleaned up. Simply close the file */
314 apr_status_t status = apr_file_close(mutex->lock_file);
316 /* Remove the file from disk. This will fail if there ares still other
317 * users of this lock file, i.e. namespace. */
318 apr_file_name_get(&lock_name, mutex->lock_file);
320 apr_file_remove(lock_name, mutex->pool);
324 #endif /* APR_HAS_MMAP */
326 /* Validate the ATOMIC parameter, i.e it's address. Correct code will
327 * never need this but if someone should accidentally to use a NULL or
328 * incomplete structure, let's catch that here instead of segfaulting.
331 validate(svn_named_atomic__t *atomic)
333 return atomic && atomic->data && atomic->mutex
335 : svn_error_create(SVN_ERR_BAD_ATOMIC, 0, _("Not a valid atomic"));
338 /* Auto-initialize and return in *ATOMIC the API-level object for the
339 * atomic with index I within NS. */
341 return_atomic(svn_named_atomic__t **atomic,
342 svn_atomic_namespace__t *ns,
345 *atomic = &ns->atomics[i];
346 if (ns->atomics[i].data == NULL)
348 (*atomic)->mutex = &ns->mutex;
349 (*atomic)->data = &ns->data->atomics[i];
356 svn_named_atomic__is_supported(void)
360 #elif !defined(_WIN32)
363 static svn_tristate_t result = svn_tristate_unknown;
365 if (result == svn_tristate_unknown)
367 /* APR SHM implementation requires the creation of global objects */
368 HANDLE handle = CreateFileMappingA(INVALID_HANDLE_VALUE,
373 "Global\\__RandomXZY_svn");
377 result = svn_tristate_true;
380 result = svn_tristate_false;
383 return result == svn_tristate_true;
388 svn_named_atomic__is_efficient(void)
390 return NA_SYNCHRONIZE_IS_FAST;
394 svn_atomic_namespace__create(svn_atomic_namespace__t **ns,
396 apr_pool_t *result_pool)
399 return svn_error_create(APR_ENOTIMPL, NULL, NULL);
401 apr_status_t apr_err;
405 const char *shm_name, *lock_name;
408 apr_pool_t *subpool = svn_pool_create(result_pool);
410 /* allocate the namespace data structure
412 svn_atomic_namespace__t *new_ns = apr_pcalloc(result_pool, sizeof(**ns));
414 /* construct the names of the system objects that we need
416 shm_name = apr_pstrcat(subpool, name, SHM_NAME_SUFFIX, NULL);
417 lock_name = apr_pstrcat(subpool, name, MUTEX_NAME_SUFFIX, NULL);
419 /* initialize the lock objects
421 SVN_ERR(svn_atomic__init_once(&mutex_initialized, init_thread_mutex, NULL,
424 new_ns->mutex.pool = result_pool;
425 SVN_ERR(svn_io_file_open(&new_ns->mutex.lock_file, lock_name,
426 APR_READ | APR_WRITE | APR_CREATE,
430 /* Make sure the last user of our lock file will actually remove it.
431 * Please note that only the last file handle begin closed will actually
432 * remove the underlying file (see docstring for apr_file_remove).
434 apr_pool_cleanup_register(result_pool, &new_ns->mutex,
436 apr_pool_cleanup_null);
438 /* Prevent concurrent initialization.
440 SVN_ERR(lock(&new_ns->mutex));
442 /* First, make sure that the underlying file exists. If it doesn't
443 * exist, create one and initialize its content.
445 err = svn_io_file_open(&file, shm_name,
446 APR_READ | APR_WRITE | APR_CREATE,
451 err = svn_io_stat(&finfo, shm_name, APR_FINFO_SIZE, subpool);
452 if (!err && finfo.size < sizeof(struct shared_data_t))
454 /* Zero all counters, values and names.
456 struct shared_data_t initial_data;
457 memset(&initial_data, 0, sizeof(initial_data));
458 err = svn_io_file_write_full(file, &initial_data,
459 sizeof(initial_data), NULL,
464 /* Now, map it into memory.
468 apr_err = apr_mmap_create(&mmap, file, 0, sizeof(*new_ns->data),
469 APR_MMAP_READ | APR_MMAP_WRITE , result_pool);
471 new_ns->data = mmap->mm;
473 err = svn_error_createf(apr_err, NULL,
474 _("MMAP failed for file '%s'"), shm_name);
477 svn_pool_destroy(subpool);
479 if (!err && new_ns->data)
481 /* Detect severe cases of corruption (i.e. when some outsider messed
482 * with our data file)
484 if (new_ns->data->count > MAX_ATOMIC_COUNT)
485 return svn_error_create(SVN_ERR_CORRUPTED_ATOMIC_STORAGE, 0,
486 _("Number of atomics in namespace is too large."));
488 /* Cache the number of existing, complete entries. There can't be
489 * incomplete ones from other processes because we hold the mutex.
490 * Our process will also not access this information since we are
491 * either being called from within svn_atomic__init_once or by
492 * svn_atomic_namespace__create for a new object.
494 new_ns->min_used = new_ns->data->count;
498 /* Unlock to allow other processes may access the shared memory as well.
500 return unlock(&new_ns->mutex, err);
501 #endif /* APR_HAS_MMAP */
505 svn_atomic_namespace__cleanup(const char *name,
508 const char *shm_name, *lock_name;
510 /* file names used for the specified namespace */
511 shm_name = apr_pstrcat(pool, name, SHM_NAME_SUFFIX, NULL);
512 lock_name = apr_pstrcat(pool, name, MUTEX_NAME_SUFFIX, NULL);
514 /* remove these files if they exist */
515 SVN_ERR(svn_io_remove_file2(shm_name, TRUE, pool));
516 SVN_ERR(svn_io_remove_file2(lock_name, TRUE, pool));
522 svn_named_atomic__get(svn_named_atomic__t **atomic,
523 svn_atomic_namespace__t *ns,
525 svn_boolean_t auto_create)
527 apr_uint32_t i, count;
528 svn_error_t *error = SVN_NO_ERROR;
529 apr_size_t len = strlen(name);
531 /* Check parameters and make sure we return a NULL atomic
532 * in case of failure.
535 if (len > SVN_NAMED_ATOMIC__MAX_NAME_LENGTH)
536 return svn_error_create(SVN_ERR_BAD_ATOMIC, 0,
537 _("Atomic's name is too long."));
539 /* If no namespace has been provided, bail out.
541 if (ns == NULL || ns->data == NULL)
542 return svn_error_create(SVN_ERR_BAD_ATOMIC, 0,
543 _("Namespace has not been initialized."));
545 /* Optimistic lookup.
546 * Because we never change the name of existing atomics and may only
547 * append new ones, we can safely compare the name of existing ones
548 * with the name that we are looking for.
550 for (i = 0, count = svn_atomic_read(&ns->min_used); i < count; ++i)
551 if (strncmp(ns->data->atomics[i].name, name, len + 1) == 0)
553 return_atomic(atomic, ns, i);
558 * Serialize all lookup and insert the item, if necessary and allowed.
560 SVN_ERR(lock(&ns->mutex));
562 /* We only need to check for new entries.
564 for (i = count; i < ns->data->count; ++i)
565 if (strncmp(ns->data->atomics[i].name, name, len + 1) == 0)
567 return_atomic(atomic, ns, i);
569 /* Update our cached number of complete entries. */
570 svn_atomic_set(&ns->min_used, ns->data->count);
572 return unlock(&ns->mutex, error);
575 /* Not found. Append a new entry, if allowed & possible.
579 if (ns->data->count < MAX_ATOMIC_COUNT)
581 ns->data->atomics[ns->data->count].value = 0;
582 memcpy(ns->data->atomics[ns->data->count].name,
586 return_atomic(atomic, ns, ns->data->count);
590 error = svn_error_create(SVN_ERR_BAD_ATOMIC, 0,
591 _("Out of slots for named atomic."));
594 /* We are mainly done here. Let others continue their work.
596 SVN_ERR(unlock(&ns->mutex, error));
598 /* Only now can we be sure that a full memory barrier has been set
599 * and that the new entry has been written to memory in full.
601 svn_atomic_set(&ns->min_used, ns->data->count);
607 svn_named_atomic__read(apr_int64_t *value,
608 svn_named_atomic__t *atomic)
610 SVN_ERR(validate(atomic));
611 NA_SYNCHRONIZE(atomic, *value = synched_read(&atomic->data->value));
617 svn_named_atomic__write(apr_int64_t *old_value,
618 apr_int64_t new_value,
619 svn_named_atomic__t *atomic)
623 SVN_ERR(validate(atomic));
624 NA_SYNCHRONIZE(atomic, temp = synched_write(&atomic->data->value, new_value));
633 svn_named_atomic__add(apr_int64_t *new_value,
635 svn_named_atomic__t *atomic)
639 SVN_ERR(validate(atomic));
640 NA_SYNCHRONIZE(atomic, temp = synched_add(&atomic->data->value, delta));
649 svn_named_atomic__cmpxchg(apr_int64_t *old_value,
650 apr_int64_t new_value,
651 apr_int64_t comperand,
652 svn_named_atomic__t *atomic)
656 SVN_ERR(validate(atomic));
657 NA_SYNCHRONIZE(atomic, temp = synched_cmpxchg(&atomic->data->value,