]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - usr.sbin/nscd/cacheplcs.c
This commit was generated by cvs2svn to compensate for changes in r175256,
[FreeBSD/FreeBSD.git] / usr.sbin / nscd / cacheplcs.c
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <assert.h>
32 #include <string.h>
33 #include "cacheplcs.h"
34 #include "debug.h"
35
36 static void cache_fifo_policy_update_item(struct cache_policy_ *,
37         struct cache_policy_item_ *);
38 static void cache_lfu_policy_add_item(struct cache_policy_ *,
39         struct cache_policy_item_ *);
40 static struct cache_policy_item_ * cache_lfu_policy_create_item(void);
41 static void cache_lfu_policy_destroy_item(struct cache_policy_item_ *);
42 static struct cache_policy_item_ *cache_lfu_policy_get_first_item(
43         struct cache_policy_ *);
44 static struct cache_policy_item_ *cache_lfu_policy_get_last_item(
45         struct cache_policy_ *);
46 static struct cache_policy_item_ *cache_lfu_policy_get_next_item(
47         struct cache_policy_ *, struct cache_policy_item_ *);
48 static struct cache_policy_item_ *cache_lfu_policy_get_prev_item(
49         struct cache_policy_ *, struct cache_policy_item_ *);
50 static void cache_lfu_policy_remove_item(struct cache_policy_ *,
51         struct cache_policy_item_ *);
52 static void cache_lfu_policy_update_item(struct cache_policy_ *,
53         struct cache_policy_item_ *);
54 static void cache_lru_policy_update_item(struct cache_policy_ *,
55         struct cache_policy_item_ *);
56 static void cache_queue_policy_add_item(struct cache_policy_ *,
57         struct cache_policy_item_ *);
58 static struct cache_policy_item_ * cache_queue_policy_create_item();
59 static void cache_queue_policy_destroy_item(struct cache_policy_item_ *);
60 static struct cache_policy_item_ *cache_queue_policy_get_first_item(
61         struct cache_policy_ *);
62 static struct cache_policy_item_ *cache_queue_policy_get_last_item(
63         struct cache_policy_ *);
64 static struct cache_policy_item_ *cache_queue_policy_get_next_item(
65         struct cache_policy_ *, struct cache_policy_item_ *);
66 static struct cache_policy_item_ *cache_queue_policy_get_prev_item(
67         struct cache_policy_ *, struct cache_policy_item_ *);
68 static void cache_queue_policy_remove_item(struct cache_policy_ *,
69         struct cache_policy_item_ *);
70 static void destroy_cache_queue_policy(struct cache_queue_policy_ *);
71 static struct cache_queue_policy_ *init_cache_queue_policy(void);
72
73 /*
74  * All cache_queue_policy_XXX functions below will be used to fill
75  * the cache_queue_policy structure. They implement the most functionality of
76  * LRU and FIFO policies. LRU and FIFO policies are actually the
77  * cache_queue_policy_ with cache_update_item function changed.
78  */
79 static struct cache_policy_item_ *
80 cache_queue_policy_create_item()
81 {
82         struct cache_queue_policy_item_ *retval;
83
84         TRACE_IN(cache_queue_policy_create_item);
85         retval = (struct cache_queue_policy_item_ *)malloc(
86                 sizeof(struct cache_queue_policy_item_));
87         assert(retval != NULL);
88         memset(retval, 0, sizeof(struct cache_queue_policy_item_));
89
90         TRACE_OUT(cache_queue_policy_create_item);
91         return ((struct cache_policy_item_ *)retval);
92 }
93
94 static void
95 cache_queue_policy_destroy_item(struct cache_policy_item_ *item)
96 {
97
98         TRACE_IN(cache_queue_policy_destroy_item);
99         assert(item != NULL);
100         free(item);
101         TRACE_OUT(cache_queue_policy_destroy_item);
102 }
103
104 static void
105 cache_queue_policy_add_item(struct cache_policy_ *policy,
106         struct cache_policy_item_ *item)
107 {
108         struct cache_queue_policy_ *queue_policy;
109         struct cache_queue_policy_item_ *queue_item;
110
111         TRACE_IN(cache_queue_policy_add_item);
112         queue_policy = (struct cache_queue_policy_ *)policy;
113         queue_item = (struct cache_queue_policy_item_ *)item;
114         TAILQ_INSERT_TAIL(&queue_policy->head, queue_item, entries);
115         TRACE_OUT(cache_queue_policy_add_item);
116 }
117
118 static void
119 cache_queue_policy_remove_item(struct cache_policy_ *policy,
120         struct cache_policy_item_ *item)
121 {
122         struct cache_queue_policy_ *queue_policy;
123         struct cache_queue_policy_item_ *queue_item;
124
125         TRACE_IN(cache_queue_policy_remove_item);
126         queue_policy = (struct cache_queue_policy_ *)policy;
127         queue_item = (struct cache_queue_policy_item_ *)item;
128         TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
129         TRACE_OUT(cache_queue_policy_remove_item);
130 }
131
132 static struct cache_policy_item_ *
133 cache_queue_policy_get_first_item(struct cache_policy_ *policy)
134 {
135         struct cache_queue_policy_ *queue_policy;
136
137         TRACE_IN(cache_queue_policy_get_first_item);
138         queue_policy = (struct cache_queue_policy_ *)policy;
139         TRACE_OUT(cache_queue_policy_get_first_item);
140         return ((struct cache_policy_item_ *)TAILQ_FIRST(&queue_policy->head));
141 }
142
143 static struct cache_policy_item_ *
144 cache_queue_policy_get_last_item(struct cache_policy_ *policy)
145 {
146         struct cache_queue_policy_ *queue_policy;
147
148         TRACE_IN(cache_queue_policy_get_last_item);
149         queue_policy = (struct cache_queue_policy_ *)policy;
150         TRACE_OUT(cache_queue_policy_get_last_item);
151         return ((struct cache_policy_item_ *)TAILQ_LAST(&queue_policy->head,
152                 cache_queue_policy_head_));
153 }
154
155 static struct cache_policy_item_ *
156 cache_queue_policy_get_next_item(struct cache_policy_ *policy,
157         struct cache_policy_item_ *item)
158 {
159         struct cache_queue_policy_ *queue_policy;
160         struct cache_queue_policy_item_ *queue_item;
161
162         TRACE_IN(cache_queue_policy_get_next_item);
163         queue_policy = (struct cache_queue_policy_ *)policy;
164         queue_item = (struct cache_queue_policy_item_ *)item;
165
166         TRACE_OUT(cache_queue_policy_get_next_item);
167         return ((struct cache_policy_item_ *)TAILQ_NEXT(queue_item, entries));
168 }
169
170 static struct cache_policy_item_ *
171 cache_queue_policy_get_prev_item(struct cache_policy_ *policy,
172         struct cache_policy_item_ *item)
173 {
174         struct cache_queue_policy_ *queue_policy;
175         struct cache_queue_policy_item_ *queue_item;
176
177         TRACE_IN(cache_queue_policy_get_prev_item);
178         queue_policy = (struct cache_queue_policy_ *)policy;
179         queue_item = (struct cache_queue_policy_item_ *)item;
180
181         TRACE_OUT(cache_queue_policy_get_prev_item);
182         return ((struct cache_policy_item_ *)TAILQ_PREV(queue_item,
183                 cache_queue_policy_head_, entries));
184 }
185
186 /*
187  * Initializes cache_queue_policy_ by filling the structure with the functions
188  * pointers, defined above
189  */
190 static struct cache_queue_policy_ *
191 init_cache_queue_policy(void)
192 {
193         struct cache_queue_policy_      *retval;
194
195         TRACE_IN(init_cache_queue_policy);
196         retval = (struct cache_queue_policy_ *)malloc(
197                 sizeof(struct cache_queue_policy_));
198         assert(retval != NULL);
199         memset(retval, 0, sizeof(struct cache_queue_policy_));
200
201         retval->parent_data.create_item_func = cache_queue_policy_create_item;
202         retval->parent_data.destroy_item_func = cache_queue_policy_destroy_item;
203
204         retval->parent_data.add_item_func = cache_queue_policy_add_item;
205         retval->parent_data.remove_item_func = cache_queue_policy_remove_item;
206
207         retval->parent_data.get_first_item_func =
208                 cache_queue_policy_get_first_item;
209         retval->parent_data.get_last_item_func =
210                 cache_queue_policy_get_last_item;
211         retval->parent_data.get_next_item_func =
212                 cache_queue_policy_get_next_item;
213         retval->parent_data.get_prev_item_func =
214                 cache_queue_policy_get_prev_item;
215
216         TAILQ_INIT(&retval->head);
217         TRACE_OUT(init_cache_queue_policy);
218         return (retval);
219 }
220
221 static void
222 destroy_cache_queue_policy(struct cache_queue_policy_ *queue_policy)
223 {
224         struct cache_queue_policy_item_ *queue_item;
225
226         TRACE_IN(destroy_cache_queue_policy);
227         while (!TAILQ_EMPTY(&queue_policy->head)) {
228                 queue_item = TAILQ_FIRST(&queue_policy->head);
229                 TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
230                 cache_queue_policy_destroy_item(
231                         (struct cache_policy_item_ *)queue_item);
232         }
233         free(queue_policy);
234         TRACE_OUT(destroy_cache_queue_policy);
235 }
236
237 /*
238  * Makes cache_queue_policy_ behave like FIFO policy - we don't do anything,
239  * when the cache element is updated. So it always stays in its initial
240  * position in the queue - that is exactly the FIFO functionality.
241  */
242 static void
243 cache_fifo_policy_update_item(struct cache_policy_ *policy,
244         struct cache_policy_item_ *item)
245 {
246
247         TRACE_IN(cache_fifo_policy_update_item);
248         /* policy and item arguments are ignored */
249         TRACE_OUT(cache_fifo_policy_update_item);
250 }
251
252 struct cache_policy_ *
253 init_cache_fifo_policy()
254 {
255         struct cache_queue_policy_ *retval;
256
257         TRACE_IN(init_cache_fifo_policy);
258         retval = init_cache_queue_policy();
259         retval->parent_data.update_item_func = cache_fifo_policy_update_item;
260
261         TRACE_OUT(init_cache_fifo_policy);
262         return ((struct cache_policy_ *)retval);
263 }
264
265 void
266 destroy_cache_fifo_policy(struct cache_policy_ *policy)
267 {
268         struct cache_queue_policy_      *queue_policy;
269
270         TRACE_IN(destroy_cache_fifo_policy);
271         queue_policy = (struct cache_queue_policy_ *)policy;
272         destroy_cache_queue_policy(queue_policy);
273         TRACE_OUT(destroy_cache_fifo_policy);
274 }
275
276 /*
277  * Makes cache_queue_policy_ behave like LRU policy. On each update, cache
278  * element is moved to the end of the queue - so it would be deleted in last
279  * turn. That is exactly the LRU policy functionality.
280  */
281 static void
282 cache_lru_policy_update_item(struct cache_policy_ *policy,
283         struct cache_policy_item_ *item)
284 {
285         struct cache_queue_policy_ *queue_policy;
286         struct cache_queue_policy_item_ *queue_item;
287
288         TRACE_IN(cache_lru_policy_update_item);
289         queue_policy = (struct cache_queue_policy_ *)policy;
290         queue_item = (struct cache_queue_policy_item_ *)item;
291
292         TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
293         TAILQ_INSERT_TAIL(&queue_policy->head, queue_item, entries);
294         TRACE_OUT(cache_lru_policy_update_item);
295 }
296
297 struct cache_policy_ *
298 init_cache_lru_policy()
299 {
300         struct cache_queue_policy_ *retval;
301
302         TRACE_IN(init_cache_lru_policy);
303         retval = init_cache_queue_policy();
304         retval->parent_data.update_item_func = cache_lru_policy_update_item;
305
306         TRACE_OUT(init_cache_lru_policy);
307         return ((struct cache_policy_ *)retval);
308 }
309
310 void
311 destroy_cache_lru_policy(struct cache_policy_ *policy)
312 {
313         struct cache_queue_policy_      *queue_policy;
314
315         TRACE_IN(destroy_cache_lru_policy);
316         queue_policy = (struct cache_queue_policy_ *)policy;
317         destroy_cache_queue_policy(queue_policy);
318         TRACE_OUT(destroy_cache_lru_policy);
319 }
320
321 /*
322  * LFU (least frequently used) policy implementation differs much from the
323  * LRU and FIFO (both based on cache_queue_policy_). Almost all cache_policy_
324  * functions are implemented specifically for this policy. The idea of this
325  * policy is to represent frequency (real number) as the integer number and
326  * use it as the index in the array. Each array's element is
327  * the list of elements. For example, if we have the 100-elements
328  * array for this policy, the elements with frequency 0.1 (calls per-second)
329  * would be in 10th element of the array.
330  */
331 static struct cache_policy_item_ *
332 cache_lfu_policy_create_item(void)
333 {
334         struct cache_lfu_policy_item_ *retval;
335
336         TRACE_IN(cache_lfu_policy_create_item);
337         retval = (struct cache_lfu_policy_item_ *)malloc(
338                 sizeof(struct cache_lfu_policy_item_));
339         assert(retval != NULL);
340         memset(retval, 0, sizeof(struct cache_lfu_policy_item_));
341
342         TRACE_OUT(cache_lfu_policy_create_item);
343         return ((struct cache_policy_item_ *)retval);
344 }
345
346 static void
347 cache_lfu_policy_destroy_item(struct cache_policy_item_ *item)
348 {
349
350         TRACE_IN(cache_lfu_policy_destroy_item);
351         assert(item != NULL);
352         free(item);
353         TRACE_OUT(cache_lfu_policy_destroy_item);
354 }
355
356 /*
357  * When placed in the LFU policy queue for the first time, the maximum
358  * frequency is assigned to the element
359  */
360 static void
361 cache_lfu_policy_add_item(struct cache_policy_ *policy,
362         struct cache_policy_item_ *item)
363 {
364         struct cache_lfu_policy_ *lfu_policy;
365         struct cache_lfu_policy_item_ *lfu_item;
366
367         TRACE_IN(cache_lfu_policy_add_item);
368         lfu_policy = (struct cache_lfu_policy_ *)policy;
369         lfu_item = (struct cache_lfu_policy_item_ *)item;
370
371         lfu_item->frequency = CACHELIB_MAX_FREQUENCY - 1;
372         TAILQ_INSERT_HEAD(&(lfu_policy->groups[CACHELIB_MAX_FREQUENCY - 1]),
373                 lfu_item, entries);
374         TRACE_OUT(cache_lfu_policy_add_item);
375 }
376
377 /*
378  * On each update the frequency of the element is recalculated and, if it
379  * changed, the element would be moved to the another place in the array.
380  */
381 static void
382 cache_lfu_policy_update_item(struct cache_policy_ *policy,
383         struct cache_policy_item_ *item)
384 {
385         struct cache_lfu_policy_ *lfu_policy;
386         struct cache_lfu_policy_item_ *lfu_item;
387         int index;
388
389         TRACE_IN(cache_lfu_policy_update_item);
390         lfu_policy = (struct cache_lfu_policy_ *)policy;
391         lfu_item = (struct cache_lfu_policy_item_ *)item;
392
393         /*
394          * We calculate the square of the request_count to avoid grouping of
395          * all elements at the start of the array (for example, if array size is
396          * 100 and most of its elements has frequency below the 0.01, they
397          * all would be grouped in the first array's position). Other
398          * techniques should be used here later to ensure, that elements are
399          * equally distributed  in the array and not grouped in its beginning.
400          */
401         if (lfu_item->parent_data.last_request_time.tv_sec !=
402                 lfu_item->parent_data.creation_time.tv_sec) {
403                 index = ((double)lfu_item->parent_data.request_count *
404                         (double)lfu_item->parent_data.request_count /
405                         (lfu_item->parent_data.last_request_time.tv_sec -
406                             lfu_item->parent_data.creation_time.tv_sec + 1)) *
407                             CACHELIB_MAX_FREQUENCY;
408                 if (index >= CACHELIB_MAX_FREQUENCY)
409                         index = CACHELIB_MAX_FREQUENCY - 1;
410         } else
411                 index = CACHELIB_MAX_FREQUENCY - 1;
412
413         TAILQ_REMOVE(&(lfu_policy->groups[lfu_item->frequency]), lfu_item,
414                 entries);
415         lfu_item->frequency = index;
416         TAILQ_INSERT_HEAD(&(lfu_policy->groups[index]), lfu_item, entries);
417
418         TRACE_OUT(cache_lfu_policy_update_item);
419 }
420
421 static void
422 cache_lfu_policy_remove_item(struct cache_policy_ *policy,
423         struct cache_policy_item_ *item)
424 {
425         struct cache_lfu_policy_ *lfu_policy;
426         struct cache_lfu_policy_item_ *lfu_item;
427
428         TRACE_IN(cache_lfu_policy_remove_item);
429         lfu_policy = (struct cache_lfu_policy_ *)policy;
430         lfu_item = (struct cache_lfu_policy_item_ *)item;
431
432         TAILQ_REMOVE(&(lfu_policy->groups[lfu_item->frequency]), lfu_item,
433                 entries);
434         TRACE_OUT(cache_lfu_policy_remove_item);
435 }
436
437 static struct cache_policy_item_ *
438 cache_lfu_policy_get_first_item(struct cache_policy_ *policy)
439 {
440         struct cache_lfu_policy_ *lfu_policy;
441         struct cache_lfu_policy_item_ *lfu_item;
442         int i;
443
444         TRACE_IN(cache_lfu_policy_get_first_item);
445         lfu_item = NULL;
446         lfu_policy = (struct cache_lfu_policy_ *)policy;
447         for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i)
448                 if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
449                         lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
450                         break;
451                 }
452
453         TRACE_OUT(cache_lfu_policy_get_first_item);
454         return ((struct cache_policy_item_ *)lfu_item);
455 }
456
457 static struct cache_policy_item_ *
458 cache_lfu_policy_get_last_item(struct cache_policy_ *policy)
459 {
460         struct cache_lfu_policy_ *lfu_policy;
461         struct cache_lfu_policy_item_ *lfu_item;
462         int i;
463
464         TRACE_IN(cache_lfu_policy_get_last_item);
465         lfu_item = NULL;
466         lfu_policy = (struct cache_lfu_policy_ *)policy;
467         for (i = CACHELIB_MAX_FREQUENCY - 1; i >= 0; --i)
468                 if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
469                         lfu_item = TAILQ_LAST(&(lfu_policy->groups[i]),
470                                 cache_lfu_policy_group_);
471                         break;
472                 }
473
474         TRACE_OUT(cache_lfu_policy_get_last_item);
475         return ((struct cache_policy_item_ *)lfu_item);
476 }
477
478 static struct cache_policy_item_ *
479 cache_lfu_policy_get_next_item(struct cache_policy_ *policy,
480         struct cache_policy_item_ *item)
481 {
482         struct cache_lfu_policy_ *lfu_policy;
483         struct cache_lfu_policy_item_ *lfu_item;
484         int i;
485
486         TRACE_IN(cache_lfu_policy_get_next_item);
487         lfu_policy = (struct cache_lfu_policy_ *)policy;
488         lfu_item = TAILQ_NEXT((struct cache_lfu_policy_item_ *)item, entries);
489         if (lfu_item == NULL)
490         {
491                 for (i = ((struct cache_lfu_policy_item_ *)item)->frequency + 1;
492                         i < CACHELIB_MAX_FREQUENCY; ++i) {
493                         if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
494                             lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
495                             break;
496                         }
497                 }
498         }
499
500         TRACE_OUT(cache_lfu_policy_get_next_item);
501         return ((struct cache_policy_item_ *)lfu_item);
502 }
503
504 static struct cache_policy_item_ *
505 cache_lfu_policy_get_prev_item(struct cache_policy_ *policy,
506         struct cache_policy_item_ *item)
507 {
508         struct cache_lfu_policy_ *lfu_policy;
509         struct cache_lfu_policy_item_ *lfu_item;
510         int i;
511
512         TRACE_IN(cache_lfu_policy_get_prev_item);
513         lfu_policy = (struct cache_lfu_policy_ *)policy;
514         lfu_item = TAILQ_PREV((struct cache_lfu_policy_item_ *)item,
515                 cache_lfu_policy_group_, entries);
516         if (lfu_item == NULL)
517         {
518                 for (i = ((struct cache_lfu_policy_item_ *)item)->frequency - 1;
519                         i >= 0; --i)
520                         if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
521                                 lfu_item = TAILQ_LAST(&(lfu_policy->groups[i]),
522                                         cache_lfu_policy_group_);
523                                 break;
524                 }
525         }
526
527         TRACE_OUT(cache_lfu_policy_get_prev_item);
528         return ((struct cache_policy_item_ *)lfu_item);
529 }
530
531 /*
532  * Initializes the cache_policy_ structure by filling it with appropriate
533  * functions pointers
534  */
535 struct cache_policy_ *
536 init_cache_lfu_policy()
537 {
538         int i;
539         struct cache_lfu_policy_ *retval;
540
541         TRACE_IN(init_cache_lfu_policy);
542         retval = (struct cache_lfu_policy_ *)malloc(
543                 sizeof(struct cache_lfu_policy_));
544         assert(retval != NULL);
545         memset(retval, 0, sizeof(struct cache_lfu_policy_));
546
547         retval->parent_data.create_item_func = cache_lfu_policy_create_item;
548         retval->parent_data.destroy_item_func = cache_lfu_policy_destroy_item;
549
550         retval->parent_data.add_item_func = cache_lfu_policy_add_item;
551         retval->parent_data.update_item_func = cache_lfu_policy_update_item;
552         retval->parent_data.remove_item_func = cache_lfu_policy_remove_item;
553
554         retval->parent_data.get_first_item_func =
555                 cache_lfu_policy_get_first_item;
556         retval->parent_data.get_last_item_func =
557                 cache_lfu_policy_get_last_item;
558         retval->parent_data.get_next_item_func =
559                 cache_lfu_policy_get_next_item;
560         retval->parent_data.get_prev_item_func =
561                 cache_lfu_policy_get_prev_item;
562
563         for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i)
564                 TAILQ_INIT(&(retval->groups[i]));
565
566         TRACE_OUT(init_cache_lfu_policy);
567         return ((struct cache_policy_ *)retval);
568 }
569
570 void
571 destroy_cache_lfu_policy(struct cache_policy_ *policy)
572 {
573         int i;
574         struct cache_lfu_policy_ *lfu_policy;
575         struct cache_lfu_policy_item_ *lfu_item;
576
577         TRACE_IN(destroy_cache_lfu_policy);
578         lfu_policy = (struct cache_lfu_policy_ *)policy;
579         for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i) {
580                 while (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
581                         lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
582                         TAILQ_REMOVE(&(lfu_policy->groups[i]), lfu_item,
583                                 entries);
584                         cache_lfu_policy_destroy_item(
585                                 (struct cache_policy_item_ *)lfu_item);
586                 }
587         }
588         free(policy);
589         TRACE_OUT(destroy_cache_lfu_policy);
590 }