2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
33 #include "cacheplcs.h"
36 static void cache_fifo_policy_update_item(struct cache_policy_ *,
37 struct cache_policy_item_ *);
38 static void cache_lfu_policy_add_item(struct cache_policy_ *,
39 struct cache_policy_item_ *);
40 static struct cache_policy_item_ * cache_lfu_policy_create_item(void);
41 static void cache_lfu_policy_destroy_item(struct cache_policy_item_ *);
42 static struct cache_policy_item_ *cache_lfu_policy_get_first_item(
43 struct cache_policy_ *);
44 static struct cache_policy_item_ *cache_lfu_policy_get_last_item(
45 struct cache_policy_ *);
46 static struct cache_policy_item_ *cache_lfu_policy_get_next_item(
47 struct cache_policy_ *, struct cache_policy_item_ *);
48 static struct cache_policy_item_ *cache_lfu_policy_get_prev_item(
49 struct cache_policy_ *, struct cache_policy_item_ *);
50 static void cache_lfu_policy_remove_item(struct cache_policy_ *,
51 struct cache_policy_item_ *);
52 static void cache_lfu_policy_update_item(struct cache_policy_ *,
53 struct cache_policy_item_ *);
54 static void cache_lru_policy_update_item(struct cache_policy_ *,
55 struct cache_policy_item_ *);
56 static void cache_queue_policy_add_item(struct cache_policy_ *,
57 struct cache_policy_item_ *);
58 static struct cache_policy_item_ * cache_queue_policy_create_item();
59 static void cache_queue_policy_destroy_item(struct cache_policy_item_ *);
60 static struct cache_policy_item_ *cache_queue_policy_get_first_item(
61 struct cache_policy_ *);
62 static struct cache_policy_item_ *cache_queue_policy_get_last_item(
63 struct cache_policy_ *);
64 static struct cache_policy_item_ *cache_queue_policy_get_next_item(
65 struct cache_policy_ *, struct cache_policy_item_ *);
66 static struct cache_policy_item_ *cache_queue_policy_get_prev_item(
67 struct cache_policy_ *, struct cache_policy_item_ *);
68 static void cache_queue_policy_remove_item(struct cache_policy_ *,
69 struct cache_policy_item_ *);
70 static void destroy_cache_queue_policy(struct cache_queue_policy_ *);
71 static struct cache_queue_policy_ *init_cache_queue_policy(void);
74 * All cache_queue_policy_XXX functions below will be used to fill
75 * the cache_queue_policy structure. They implement the most functionality of
76 * LRU and FIFO policies. LRU and FIFO policies are actually the
77 * cache_queue_policy_ with cache_update_item function changed.
79 static struct cache_policy_item_ *
80 cache_queue_policy_create_item()
82 struct cache_queue_policy_item_ *retval;
84 TRACE_IN(cache_queue_policy_create_item);
85 retval = (struct cache_queue_policy_item_ *)malloc(
86 sizeof(struct cache_queue_policy_item_));
87 assert(retval != NULL);
88 memset(retval, 0, sizeof(struct cache_queue_policy_item_));
90 TRACE_OUT(cache_queue_policy_create_item);
91 return ((struct cache_policy_item_ *)retval);
95 cache_queue_policy_destroy_item(struct cache_policy_item_ *item)
98 TRACE_IN(cache_queue_policy_destroy_item);
101 TRACE_OUT(cache_queue_policy_destroy_item);
105 cache_queue_policy_add_item(struct cache_policy_ *policy,
106 struct cache_policy_item_ *item)
108 struct cache_queue_policy_ *queue_policy;
109 struct cache_queue_policy_item_ *queue_item;
111 TRACE_IN(cache_queue_policy_add_item);
112 queue_policy = (struct cache_queue_policy_ *)policy;
113 queue_item = (struct cache_queue_policy_item_ *)item;
114 TAILQ_INSERT_TAIL(&queue_policy->head, queue_item, entries);
115 TRACE_OUT(cache_queue_policy_add_item);
119 cache_queue_policy_remove_item(struct cache_policy_ *policy,
120 struct cache_policy_item_ *item)
122 struct cache_queue_policy_ *queue_policy;
123 struct cache_queue_policy_item_ *queue_item;
125 TRACE_IN(cache_queue_policy_remove_item);
126 queue_policy = (struct cache_queue_policy_ *)policy;
127 queue_item = (struct cache_queue_policy_item_ *)item;
128 TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
129 TRACE_OUT(cache_queue_policy_remove_item);
132 static struct cache_policy_item_ *
133 cache_queue_policy_get_first_item(struct cache_policy_ *policy)
135 struct cache_queue_policy_ *queue_policy;
137 TRACE_IN(cache_queue_policy_get_first_item);
138 queue_policy = (struct cache_queue_policy_ *)policy;
139 TRACE_OUT(cache_queue_policy_get_first_item);
140 return ((struct cache_policy_item_ *)TAILQ_FIRST(&queue_policy->head));
143 static struct cache_policy_item_ *
144 cache_queue_policy_get_last_item(struct cache_policy_ *policy)
146 struct cache_queue_policy_ *queue_policy;
148 TRACE_IN(cache_queue_policy_get_last_item);
149 queue_policy = (struct cache_queue_policy_ *)policy;
150 TRACE_OUT(cache_queue_policy_get_last_item);
151 return ((struct cache_policy_item_ *)TAILQ_LAST(&queue_policy->head,
152 cache_queue_policy_head_));
155 static struct cache_policy_item_ *
156 cache_queue_policy_get_next_item(struct cache_policy_ *policy,
157 struct cache_policy_item_ *item)
159 struct cache_queue_policy_ *queue_policy;
160 struct cache_queue_policy_item_ *queue_item;
162 TRACE_IN(cache_queue_policy_get_next_item);
163 queue_policy = (struct cache_queue_policy_ *)policy;
164 queue_item = (struct cache_queue_policy_item_ *)item;
166 TRACE_OUT(cache_queue_policy_get_next_item);
167 return ((struct cache_policy_item_ *)TAILQ_NEXT(queue_item, entries));
170 static struct cache_policy_item_ *
171 cache_queue_policy_get_prev_item(struct cache_policy_ *policy,
172 struct cache_policy_item_ *item)
174 struct cache_queue_policy_ *queue_policy;
175 struct cache_queue_policy_item_ *queue_item;
177 TRACE_IN(cache_queue_policy_get_prev_item);
178 queue_policy = (struct cache_queue_policy_ *)policy;
179 queue_item = (struct cache_queue_policy_item_ *)item;
181 TRACE_OUT(cache_queue_policy_get_prev_item);
182 return ((struct cache_policy_item_ *)TAILQ_PREV(queue_item,
183 cache_queue_policy_head_, entries));
187 * Initializes cache_queue_policy_ by filling the structure with the functions
188 * pointers, defined above
190 static struct cache_queue_policy_ *
191 init_cache_queue_policy(void)
193 struct cache_queue_policy_ *retval;
195 TRACE_IN(init_cache_queue_policy);
196 retval = (struct cache_queue_policy_ *)malloc(
197 sizeof(struct cache_queue_policy_));
198 assert(retval != NULL);
199 memset(retval, 0, sizeof(struct cache_queue_policy_));
201 retval->parent_data.create_item_func = cache_queue_policy_create_item;
202 retval->parent_data.destroy_item_func = cache_queue_policy_destroy_item;
204 retval->parent_data.add_item_func = cache_queue_policy_add_item;
205 retval->parent_data.remove_item_func = cache_queue_policy_remove_item;
207 retval->parent_data.get_first_item_func =
208 cache_queue_policy_get_first_item;
209 retval->parent_data.get_last_item_func =
210 cache_queue_policy_get_last_item;
211 retval->parent_data.get_next_item_func =
212 cache_queue_policy_get_next_item;
213 retval->parent_data.get_prev_item_func =
214 cache_queue_policy_get_prev_item;
216 TAILQ_INIT(&retval->head);
217 TRACE_OUT(init_cache_queue_policy);
222 destroy_cache_queue_policy(struct cache_queue_policy_ *queue_policy)
224 struct cache_queue_policy_item_ *queue_item;
226 TRACE_IN(destroy_cache_queue_policy);
227 while (!TAILQ_EMPTY(&queue_policy->head)) {
228 queue_item = TAILQ_FIRST(&queue_policy->head);
229 TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
230 cache_queue_policy_destroy_item(
231 (struct cache_policy_item_ *)queue_item);
234 TRACE_OUT(destroy_cache_queue_policy);
238 * Makes cache_queue_policy_ behave like FIFO policy - we don't do anything,
239 * when the cache element is updated. So it always stays in its initial
240 * position in the queue - that is exactly the FIFO functionality.
243 cache_fifo_policy_update_item(struct cache_policy_ *policy,
244 struct cache_policy_item_ *item)
247 TRACE_IN(cache_fifo_policy_update_item);
248 /* policy and item arguments are ignored */
249 TRACE_OUT(cache_fifo_policy_update_item);
252 struct cache_policy_ *
253 init_cache_fifo_policy()
255 struct cache_queue_policy_ *retval;
257 TRACE_IN(init_cache_fifo_policy);
258 retval = init_cache_queue_policy();
259 retval->parent_data.update_item_func = cache_fifo_policy_update_item;
261 TRACE_OUT(init_cache_fifo_policy);
262 return ((struct cache_policy_ *)retval);
266 destroy_cache_fifo_policy(struct cache_policy_ *policy)
268 struct cache_queue_policy_ *queue_policy;
270 TRACE_IN(destroy_cache_fifo_policy);
271 queue_policy = (struct cache_queue_policy_ *)policy;
272 destroy_cache_queue_policy(queue_policy);
273 TRACE_OUT(destroy_cache_fifo_policy);
277 * Makes cache_queue_policy_ behave like LRU policy. On each update, cache
278 * element is moved to the end of the queue - so it would be deleted in last
279 * turn. That is exactly the LRU policy functionality.
282 cache_lru_policy_update_item(struct cache_policy_ *policy,
283 struct cache_policy_item_ *item)
285 struct cache_queue_policy_ *queue_policy;
286 struct cache_queue_policy_item_ *queue_item;
288 TRACE_IN(cache_lru_policy_update_item);
289 queue_policy = (struct cache_queue_policy_ *)policy;
290 queue_item = (struct cache_queue_policy_item_ *)item;
292 TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
293 TAILQ_INSERT_TAIL(&queue_policy->head, queue_item, entries);
294 TRACE_OUT(cache_lru_policy_update_item);
297 struct cache_policy_ *
298 init_cache_lru_policy()
300 struct cache_queue_policy_ *retval;
302 TRACE_IN(init_cache_lru_policy);
303 retval = init_cache_queue_policy();
304 retval->parent_data.update_item_func = cache_lru_policy_update_item;
306 TRACE_OUT(init_cache_lru_policy);
307 return ((struct cache_policy_ *)retval);
311 destroy_cache_lru_policy(struct cache_policy_ *policy)
313 struct cache_queue_policy_ *queue_policy;
315 TRACE_IN(destroy_cache_lru_policy);
316 queue_policy = (struct cache_queue_policy_ *)policy;
317 destroy_cache_queue_policy(queue_policy);
318 TRACE_OUT(destroy_cache_lru_policy);
322 * LFU (least frequently used) policy implementation differs much from the
323 * LRU and FIFO (both based on cache_queue_policy_). Almost all cache_policy_
324 * functions are implemented specifically for this policy. The idea of this
325 * policy is to represent frequency (real number) as the integer number and
326 * use it as the index in the array. Each array's element is
327 * the list of elements. For example, if we have the 100-elements
328 * array for this policy, the elements with frequency 0.1 (calls per-second)
329 * would be in 10th element of the array.
331 static struct cache_policy_item_ *
332 cache_lfu_policy_create_item(void)
334 struct cache_lfu_policy_item_ *retval;
336 TRACE_IN(cache_lfu_policy_create_item);
337 retval = (struct cache_lfu_policy_item_ *)malloc(
338 sizeof(struct cache_lfu_policy_item_));
339 assert(retval != NULL);
340 memset(retval, 0, sizeof(struct cache_lfu_policy_item_));
342 TRACE_OUT(cache_lfu_policy_create_item);
343 return ((struct cache_policy_item_ *)retval);
347 cache_lfu_policy_destroy_item(struct cache_policy_item_ *item)
350 TRACE_IN(cache_lfu_policy_destroy_item);
351 assert(item != NULL);
353 TRACE_OUT(cache_lfu_policy_destroy_item);
357 * When placed in the LFU policy queue for the first time, the maximum
358 * frequency is assigned to the element
361 cache_lfu_policy_add_item(struct cache_policy_ *policy,
362 struct cache_policy_item_ *item)
364 struct cache_lfu_policy_ *lfu_policy;
365 struct cache_lfu_policy_item_ *lfu_item;
367 TRACE_IN(cache_lfu_policy_add_item);
368 lfu_policy = (struct cache_lfu_policy_ *)policy;
369 lfu_item = (struct cache_lfu_policy_item_ *)item;
371 lfu_item->frequency = CACHELIB_MAX_FREQUENCY - 1;
372 TAILQ_INSERT_HEAD(&(lfu_policy->groups[CACHELIB_MAX_FREQUENCY - 1]),
374 TRACE_OUT(cache_lfu_policy_add_item);
378 * On each update the frequency of the element is recalculated and, if it
379 * changed, the element would be moved to the another place in the array.
382 cache_lfu_policy_update_item(struct cache_policy_ *policy,
383 struct cache_policy_item_ *item)
385 struct cache_lfu_policy_ *lfu_policy;
386 struct cache_lfu_policy_item_ *lfu_item;
389 TRACE_IN(cache_lfu_policy_update_item);
390 lfu_policy = (struct cache_lfu_policy_ *)policy;
391 lfu_item = (struct cache_lfu_policy_item_ *)item;
394 * We calculate the square of the request_count to avoid grouping of
395 * all elements at the start of the array (for example, if array size is
396 * 100 and most of its elements has frequency below the 0.01, they
397 * all would be grouped in the first array's position). Other
398 * techniques should be used here later to ensure, that elements are
399 * equally distributed in the array and not grouped in its beginning.
401 if (lfu_item->parent_data.last_request_time.tv_sec !=
402 lfu_item->parent_data.creation_time.tv_sec) {
403 index = ((double)lfu_item->parent_data.request_count *
404 (double)lfu_item->parent_data.request_count /
405 (lfu_item->parent_data.last_request_time.tv_sec -
406 lfu_item->parent_data.creation_time.tv_sec + 1)) *
407 CACHELIB_MAX_FREQUENCY;
408 if (index >= CACHELIB_MAX_FREQUENCY)
409 index = CACHELIB_MAX_FREQUENCY - 1;
411 index = CACHELIB_MAX_FREQUENCY - 1;
413 TAILQ_REMOVE(&(lfu_policy->groups[lfu_item->frequency]), lfu_item,
415 lfu_item->frequency = index;
416 TAILQ_INSERT_HEAD(&(lfu_policy->groups[index]), lfu_item, entries);
418 TRACE_OUT(cache_lfu_policy_update_item);
422 cache_lfu_policy_remove_item(struct cache_policy_ *policy,
423 struct cache_policy_item_ *item)
425 struct cache_lfu_policy_ *lfu_policy;
426 struct cache_lfu_policy_item_ *lfu_item;
428 TRACE_IN(cache_lfu_policy_remove_item);
429 lfu_policy = (struct cache_lfu_policy_ *)policy;
430 lfu_item = (struct cache_lfu_policy_item_ *)item;
432 TAILQ_REMOVE(&(lfu_policy->groups[lfu_item->frequency]), lfu_item,
434 TRACE_OUT(cache_lfu_policy_remove_item);
437 static struct cache_policy_item_ *
438 cache_lfu_policy_get_first_item(struct cache_policy_ *policy)
440 struct cache_lfu_policy_ *lfu_policy;
441 struct cache_lfu_policy_item_ *lfu_item;
444 TRACE_IN(cache_lfu_policy_get_first_item);
446 lfu_policy = (struct cache_lfu_policy_ *)policy;
447 for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i)
448 if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
449 lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
453 TRACE_OUT(cache_lfu_policy_get_first_item);
454 return ((struct cache_policy_item_ *)lfu_item);
457 static struct cache_policy_item_ *
458 cache_lfu_policy_get_last_item(struct cache_policy_ *policy)
460 struct cache_lfu_policy_ *lfu_policy;
461 struct cache_lfu_policy_item_ *lfu_item;
464 TRACE_IN(cache_lfu_policy_get_last_item);
466 lfu_policy = (struct cache_lfu_policy_ *)policy;
467 for (i = CACHELIB_MAX_FREQUENCY - 1; i >= 0; --i)
468 if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
469 lfu_item = TAILQ_LAST(&(lfu_policy->groups[i]),
470 cache_lfu_policy_group_);
474 TRACE_OUT(cache_lfu_policy_get_last_item);
475 return ((struct cache_policy_item_ *)lfu_item);
478 static struct cache_policy_item_ *
479 cache_lfu_policy_get_next_item(struct cache_policy_ *policy,
480 struct cache_policy_item_ *item)
482 struct cache_lfu_policy_ *lfu_policy;
483 struct cache_lfu_policy_item_ *lfu_item;
486 TRACE_IN(cache_lfu_policy_get_next_item);
487 lfu_policy = (struct cache_lfu_policy_ *)policy;
488 lfu_item = TAILQ_NEXT((struct cache_lfu_policy_item_ *)item, entries);
489 if (lfu_item == NULL)
491 for (i = ((struct cache_lfu_policy_item_ *)item)->frequency + 1;
492 i < CACHELIB_MAX_FREQUENCY; ++i) {
493 if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
494 lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
500 TRACE_OUT(cache_lfu_policy_get_next_item);
501 return ((struct cache_policy_item_ *)lfu_item);
504 static struct cache_policy_item_ *
505 cache_lfu_policy_get_prev_item(struct cache_policy_ *policy,
506 struct cache_policy_item_ *item)
508 struct cache_lfu_policy_ *lfu_policy;
509 struct cache_lfu_policy_item_ *lfu_item;
512 TRACE_IN(cache_lfu_policy_get_prev_item);
513 lfu_policy = (struct cache_lfu_policy_ *)policy;
514 lfu_item = TAILQ_PREV((struct cache_lfu_policy_item_ *)item,
515 cache_lfu_policy_group_, entries);
516 if (lfu_item == NULL)
518 for (i = ((struct cache_lfu_policy_item_ *)item)->frequency - 1;
520 if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
521 lfu_item = TAILQ_LAST(&(lfu_policy->groups[i]),
522 cache_lfu_policy_group_);
527 TRACE_OUT(cache_lfu_policy_get_prev_item);
528 return ((struct cache_policy_item_ *)lfu_item);
532 * Initializes the cache_policy_ structure by filling it with appropriate
535 struct cache_policy_ *
536 init_cache_lfu_policy()
539 struct cache_lfu_policy_ *retval;
541 TRACE_IN(init_cache_lfu_policy);
542 retval = (struct cache_lfu_policy_ *)malloc(
543 sizeof(struct cache_lfu_policy_));
544 assert(retval != NULL);
545 memset(retval, 0, sizeof(struct cache_lfu_policy_));
547 retval->parent_data.create_item_func = cache_lfu_policy_create_item;
548 retval->parent_data.destroy_item_func = cache_lfu_policy_destroy_item;
550 retval->parent_data.add_item_func = cache_lfu_policy_add_item;
551 retval->parent_data.update_item_func = cache_lfu_policy_update_item;
552 retval->parent_data.remove_item_func = cache_lfu_policy_remove_item;
554 retval->parent_data.get_first_item_func =
555 cache_lfu_policy_get_first_item;
556 retval->parent_data.get_last_item_func =
557 cache_lfu_policy_get_last_item;
558 retval->parent_data.get_next_item_func =
559 cache_lfu_policy_get_next_item;
560 retval->parent_data.get_prev_item_func =
561 cache_lfu_policy_get_prev_item;
563 for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i)
564 TAILQ_INIT(&(retval->groups[i]));
566 TRACE_OUT(init_cache_lfu_policy);
567 return ((struct cache_policy_ *)retval);
571 destroy_cache_lfu_policy(struct cache_policy_ *policy)
574 struct cache_lfu_policy_ *lfu_policy;
575 struct cache_lfu_policy_item_ *lfu_item;
577 TRACE_IN(destroy_cache_lfu_policy);
578 lfu_policy = (struct cache_lfu_policy_ *)policy;
579 for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i) {
580 while (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
581 lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
582 TAILQ_REMOVE(&(lfu_policy->groups[i]), lfu_item,
584 cache_lfu_policy_destroy_item(
585 (struct cache_policy_item_ *)lfu_item);
589 TRACE_OUT(destroy_cache_lfu_policy);