2 * cache-inprocess.c: in-memory caching for Subversion
4 * ====================================================================
5 * Licensed to the Apache Software Foundation (ASF) under one
6 * or more contributor license agreements. See the NOTICE file
7 * distributed with this work for additional information
8 * regarding copyright ownership. The ASF licenses this file
9 * to you under the Apache License, Version 2.0 (the
10 * "License"); you may not use this file except in compliance
11 * with the License. You may obtain a copy of the License at
13 * http://www.apache.org/licenses/LICENSE-2.0
15 * Unless required by applicable law or agreed to in writing,
16 * software distributed under the License is distributed on an
17 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
18 * KIND, either express or implied. See the License for the
19 * specific language governing permissions and limitations
21 * ====================================================================
26 #include <apr_thread_mutex.h>
28 #include "svn_pools.h"
30 #include "svn_private_config.h"
33 #include "private/svn_mutex.h"
35 /* The (internal) cache object. */
36 typedef struct inprocess_cache_t {
37 /* A user-defined identifier for this cache instance. */
40 /* HASH maps a key (of size KLEN) to a struct cache_entry. */
44 /* Used to copy values into the cache. */
45 svn_cache__serialize_func_t serialize_func;
47 /* Used to copy values out of the cache. */
48 svn_cache__deserialize_func_t deserialize_func;
50 /* Maximum number of pages that this cache instance may allocate */
51 apr_uint64_t total_pages;
52 /* The number of pages we're allowed to allocate before having to
53 * try to reuse one. */
54 apr_uint64_t unallocated_pages;
55 /* Number of cache entries stored on each page. Must be at least 1. */
56 apr_uint64_t items_per_page;
58 /* A dummy cache_page serving as the head of a circular doubly
59 * linked list of cache_pages. SENTINEL->NEXT is the most recently
60 * used page, and SENTINEL->PREV is the least recently used page.
61 * All pages in this list are "full"; the page currently being
62 * filled (PARTIAL_PAGE) is not in the list. */
63 struct cache_page *sentinel;
65 /* A page currently being filled with entries, or NULL if there's no
66 * partially-filled page. This page is not in SENTINEL's list. */
67 struct cache_page *partial_page;
68 /* If PARTIAL_PAGE is not null, this is the number of entries
69 * currently on PARTIAL_PAGE. */
70 apr_uint64_t partial_page_number_filled;
72 /* The pool that the svn_cache__t itself, HASH, and all pages are
73 * allocated in; subpools of this pool are used for the cache_entry
74 * structs, as well as the dup'd values and hash keys.
76 apr_pool_t *cache_pool;
78 /* Sum of the SIZE members of all cache_entry elements that are
79 * accessible from HASH. This is used to make statistics available
80 * even if the sub-pools have already been destroyed.
84 /* A lock for intra-process synchronization to the cache, or NULL if
85 * the cache's creator doesn't feel the cache needs to be
90 /* A cache page; all items on the page are allocated from the same
93 /* Pointers for the LRU list anchored at the cache's SENTINEL.
94 * (NULL for the PARTIAL_PAGE.) */
95 struct cache_page *prev;
96 struct cache_page *next;
98 /* The pool in which cache_entry structs, hash keys, and dup'd
99 * values are allocated. The CACHE_PAGE structs are allocated
100 * in CACHE_POOL and have the same lifetime as the cache itself.
101 * (The cache will never allocate more than TOTAL_PAGES page
102 * structs (inclusive of the sentinel) from CACHE_POOL.)
104 apr_pool_t *page_pool;
106 /* A singly linked list of the entries on this page; used to remove
107 * them from the cache's HASH before reusing the page. */
108 struct cache_entry *first_entry;
111 /* An cache entry. */
115 /* serialized value */
118 /* length of the serialized value in bytes */
121 /* The page it's on (needed so that the LRU list can be
123 struct cache_page *page;
125 /* Next entry on the page. */
126 struct cache_entry *next_entry;
130 /* Removes PAGE from the doubly-linked list it is in (leaving its PREV
131 * and NEXT fields undefined). */
133 remove_page_from_list(struct cache_page *page)
135 page->prev->next = page->next;
136 page->next->prev = page->prev;
139 /* Inserts PAGE after CACHE's sentinel. */
141 insert_page(inprocess_cache_t *cache,
142 struct cache_page *page)
144 struct cache_page *pred = cache->sentinel;
147 page->next = pred->next;
148 page->prev->next = page;
149 page->next->prev = page;
152 /* If PAGE is in the circularly linked list (eg, its NEXT isn't NULL),
153 * move it to the front of the list. */
155 move_page_to_front(inprocess_cache_t *cache,
156 struct cache_page *page)
158 /* This function is called whilst CACHE is locked. */
160 SVN_ERR_ASSERT(page != cache->sentinel);
165 remove_page_from_list(page);
166 insert_page(cache, page);
171 /* Return a copy of KEY inside POOL, using CACHE->KLEN to figure out
174 duplicate_key(inprocess_cache_t *cache,
178 if (cache->klen == APR_HASH_KEY_STRING)
179 return apr_pstrdup(pool, key);
181 return apr_pmemdup(pool, key, cache->klen);
185 inprocess_cache_get_internal(char **buffer,
187 inprocess_cache_t *cache,
189 apr_pool_t *result_pool)
191 struct cache_entry *entry = apr_hash_get(cache->hash, key, cache->klen);
195 SVN_ERR(move_page_to_front(cache, entry->page));
197 /* duplicate the buffer entry */
198 *buffer = apr_palloc(result_pool, entry->size);
200 memcpy(*buffer, entry->value, entry->size);
214 inprocess_cache_get(void **value_p,
215 svn_boolean_t *found,
218 apr_pool_t *result_pool)
220 inprocess_cache_t *cache = cache_void;
227 SVN_MUTEX__WITH_LOCK(cache->mutex,
228 inprocess_cache_get_internal(&buffer,
233 /* deserialize the buffer content. Usually, this will directly
234 modify the buffer content directly. */
235 *found = (buffer != NULL);
236 if (!buffer || !size)
239 return cache->deserialize_func(value_p, buffer, size, result_pool);
251 inprocess_cache_has_key_internal(svn_boolean_t *found,
252 inprocess_cache_t *cache,
254 apr_pool_t *scratch_pool)
256 *found = apr_hash_get(cache->hash, key, cache->klen) != NULL;
262 inprocess_cache_has_key(svn_boolean_t *found,
265 apr_pool_t *scratch_pool)
267 inprocess_cache_t *cache = cache_void;
270 SVN_MUTEX__WITH_LOCK(cache->mutex,
271 inprocess_cache_has_key_internal(found,
281 /* Removes PAGE from the LRU list, removes all of its entries from
282 * CACHE's hash, clears its pool, and sets its entry pointer to NULL.
283 * Finally, puts it in the "partial page" slot in the cache and sets
284 * partial_page_number_filled to 0. Must be called on a page actually
287 erase_page(inprocess_cache_t *cache,
288 struct cache_page *page)
290 struct cache_entry *e;
292 remove_page_from_list(page);
294 for (e = page->first_entry;
298 cache->data_size -= e->size;
299 apr_hash_set(cache->hash, e->key, cache->klen, NULL);
302 svn_pool_clear(page->page_pool);
304 page->first_entry = NULL;
308 cache->partial_page = page;
309 cache->partial_page_number_filled = 0;
314 inprocess_cache_set_internal(inprocess_cache_t *cache,
317 apr_pool_t *scratch_pool)
319 struct cache_entry *existing_entry;
321 existing_entry = apr_hash_get(cache->hash, key, cache->klen);
323 /* Is it already here, but we can do the one-item-per-page
325 if (existing_entry && cache->items_per_page == 1)
327 /* Special case! ENTRY is the *only* entry on this page, so
328 * why not wipe it (so as not to leak the previous value).
330 struct cache_page *page = existing_entry->page;
332 /* This can't be the partial page: items_per_page == 1
333 * *never* has a partial page (except for in the temporary state
334 * that we're about to fake). */
335 SVN_ERR_ASSERT(page->next != NULL);
336 SVN_ERR_ASSERT(cache->partial_page == NULL);
338 erase_page(cache, page);
339 existing_entry = NULL;
342 /* Is it already here, and we just have to leak the old value? */
345 struct cache_page *page = existing_entry->page;
347 SVN_ERR(move_page_to_front(cache, page));
348 cache->data_size -= existing_entry->size;
351 SVN_ERR(cache->serialize_func(&existing_entry->value,
352 &existing_entry->size,
355 cache->data_size += existing_entry->size;
356 if (existing_entry->size == 0)
357 existing_entry->value = NULL;
361 existing_entry->value = NULL;
362 existing_entry->size = 0;
368 /* Do we not have a partial page to put it on, but we are allowed to
370 if (cache->partial_page == NULL && cache->unallocated_pages > 0)
372 cache->partial_page = apr_pcalloc(cache->cache_pool,
373 sizeof(*(cache->partial_page)));
374 cache->partial_page->page_pool = svn_pool_create(cache->cache_pool);
375 cache->partial_page_number_filled = 0;
376 (cache->unallocated_pages)--;
379 /* Do we really not have a partial page to put it on, even after the
380 * one-item-per-page optimization and checking the unallocated page
382 if (cache->partial_page == NULL)
384 struct cache_page *oldest_page = cache->sentinel->prev;
386 SVN_ERR_ASSERT(oldest_page != cache->sentinel);
388 /* Erase the page and put it in cache->partial_page. */
389 erase_page(cache, oldest_page);
392 SVN_ERR_ASSERT(cache->partial_page != NULL);
395 struct cache_page *page = cache->partial_page;
396 struct cache_entry *new_entry = apr_pcalloc(page->page_pool,
399 /* Copy the key and value into the page's pool. */
400 new_entry->key = duplicate_key(cache, key, page->page_pool);
403 SVN_ERR(cache->serialize_func(&new_entry->value,
407 cache->data_size += new_entry->size;
408 if (new_entry->size == 0)
409 new_entry->value = NULL;
413 new_entry->value = NULL;
417 /* Add the entry to the page's list. */
418 new_entry->page = page;
419 new_entry->next_entry = page->first_entry;
420 page->first_entry = new_entry;
422 /* Add the entry to the hash, using the *entry's* copy of the
424 apr_hash_set(cache->hash, new_entry->key, cache->klen, new_entry);
426 /* We've added something else to the partial page. */
427 (cache->partial_page_number_filled)++;
430 if (cache->partial_page_number_filled >= cache->items_per_page)
432 insert_page(cache, page);
433 cache->partial_page = NULL;
441 inprocess_cache_set(void *cache_void,
444 apr_pool_t *scratch_pool)
446 inprocess_cache_t *cache = cache_void;
449 SVN_MUTEX__WITH_LOCK(cache->mutex,
450 inprocess_cache_set_internal(cache,
458 /* Baton type for svn_cache__iter. */
459 struct cache_iter_baton {
460 svn_iter_apr_hash_cb_t user_cb;
464 /* Call the user's callback with the actual value, not the
465 cache_entry. Implements the svn_iter_apr_hash_cb_t
474 struct cache_iter_baton *b = baton;
475 struct cache_entry *entry = val;
476 return (b->user_cb)(b->user_baton, key, klen, entry->value, pool);
480 inprocess_cache_iter(svn_boolean_t *completed,
482 svn_iter_apr_hash_cb_t user_cb,
484 apr_pool_t *scratch_pool)
486 inprocess_cache_t *cache = cache_void;
487 struct cache_iter_baton b;
489 b.user_baton = user_baton;
491 SVN_MUTEX__WITH_LOCK(cache->mutex,
492 svn_iter_apr_hash(completed, cache->hash,
493 iter_cb, &b, scratch_pool));
499 inprocess_cache_get_partial_internal(void **value_p,
500 svn_boolean_t *found,
501 inprocess_cache_t *cache,
503 svn_cache__partial_getter_func_t func,
505 apr_pool_t *result_pool)
507 struct cache_entry *entry = apr_hash_get(cache->hash, key, cache->klen);
514 SVN_ERR(move_page_to_front(cache, entry->page));
517 return func(value_p, entry->value, entry->size, baton, result_pool);
521 inprocess_cache_get_partial(void **value_p,
522 svn_boolean_t *found,
525 svn_cache__partial_getter_func_t func,
527 apr_pool_t *result_pool)
529 inprocess_cache_t *cache = cache_void;
532 SVN_MUTEX__WITH_LOCK(cache->mutex,
533 inprocess_cache_get_partial_internal(value_p,
547 inprocess_cache_set_partial_internal(inprocess_cache_t *cache,
549 svn_cache__partial_setter_func_t func,
551 apr_pool_t *scratch_pool)
553 struct cache_entry *entry = apr_hash_get(cache->hash, key, cache->klen);
556 SVN_ERR(move_page_to_front(cache, entry->page));
558 cache->data_size -= entry->size;
559 SVN_ERR(func(&entry->value,
562 entry->page->page_pool));
563 cache->data_size += entry->size;
570 inprocess_cache_set_partial(void *cache_void,
572 svn_cache__partial_setter_func_t func,
574 apr_pool_t *scratch_pool)
576 inprocess_cache_t *cache = cache_void;
579 SVN_MUTEX__WITH_LOCK(cache->mutex,
580 inprocess_cache_set_partial_internal(cache,
590 inprocess_cache_is_cachable(void *cache_void, apr_size_t size)
592 /* Be relatively strict: per page we should not allocate more than
593 * we could spare as "unused" memory.
594 * But in most cases, nobody will ask anyway. And in no case, this
595 * will be used for checks _inside_ the cache code.
597 inprocess_cache_t *cache = cache_void;
598 return size < SVN_ALLOCATOR_RECOMMENDED_MAX_FREE / cache->items_per_page;
602 inprocess_cache_get_info_internal(inprocess_cache_t *cache,
603 svn_cache__info_t *info,
604 apr_pool_t *result_pool)
606 info->id = apr_pstrdup(result_pool, cache->id);
608 info->used_entries = apr_hash_count(cache->hash);
609 info->total_entries = cache->items_per_page * cache->total_pages;
611 info->used_size = cache->data_size;
612 info->data_size = cache->data_size;
613 info->total_size = cache->data_size
614 + cache->items_per_page * sizeof(struct cache_page)
615 + info->used_entries * sizeof(struct cache_entry);
622 inprocess_cache_get_info(void *cache_void,
623 svn_cache__info_t *info,
625 apr_pool_t *result_pool)
627 inprocess_cache_t *cache = cache_void;
629 SVN_MUTEX__WITH_LOCK(cache->mutex,
630 inprocess_cache_get_info_internal(cache,
637 static svn_cache__vtable_t inprocess_cache_vtable = {
639 inprocess_cache_has_key,
641 inprocess_cache_iter,
642 inprocess_cache_is_cachable,
643 inprocess_cache_get_partial,
644 inprocess_cache_set_partial,
645 inprocess_cache_get_info
649 svn_cache__create_inprocess(svn_cache__t **cache_p,
650 svn_cache__serialize_func_t serialize,
651 svn_cache__deserialize_func_t deserialize,
654 apr_int64_t items_per_page,
655 svn_boolean_t thread_safe,
659 svn_cache__t *wrapper = apr_pcalloc(pool, sizeof(*wrapper));
660 inprocess_cache_t *cache = apr_pcalloc(pool, sizeof(*cache));
662 cache->id = apr_pstrdup(pool, id);
664 SVN_ERR_ASSERT(klen == APR_HASH_KEY_STRING || klen >= 1);
666 cache->hash = apr_hash_make(pool);
669 cache->serialize_func = serialize;
670 cache->deserialize_func = deserialize;
672 SVN_ERR_ASSERT(pages >= 1);
673 cache->total_pages = pages;
674 cache->unallocated_pages = pages;
675 SVN_ERR_ASSERT(items_per_page >= 1);
676 cache->items_per_page = items_per_page;
678 cache->sentinel = apr_pcalloc(pool, sizeof(*(cache->sentinel)));
679 cache->sentinel->prev = cache->sentinel;
680 cache->sentinel->next = cache->sentinel;
681 /* The sentinel doesn't need a pool. (We're happy to crash if we
682 * accidentally try to treat it like a real page.) */
684 SVN_ERR(svn_mutex__init(&cache->mutex, thread_safe, pool));
686 cache->cache_pool = pool;
688 wrapper->vtable = &inprocess_cache_vtable;
689 wrapper->cache_internal = cache;
690 wrapper->pretend_empty = !!getenv("SVN_X_DOES_NOT_MARK_THE_SPOT");