2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #ifdef DEBUG_SUBSYSTEM
30 #undef DEBUG_SUBSYSTEM
33 #define DEBUG_SUBSYSTEM S_KMEM
36 * Memory allocation interfaces and debugging for basic kmem_*
37 * and vmem_* style memory allocation. When DEBUG_KMEM is enable
38 * all allocations will be tracked when they are allocated and
39 * freed. When the SPL module is unload a list of all leaked
40 * addresses and where they were allocated will be dumped to the
41 * console. Enabling this feature has a significant impant on
42 * performance but it makes finding memory leaks staight forward.
45 /* Shim layer memory accounting */
46 atomic64_t kmem_alloc_used;
47 unsigned long kmem_alloc_max = 0;
48 atomic64_t vmem_alloc_used;
49 unsigned long vmem_alloc_max = 0;
50 int kmem_warning_flag = 1;
51 atomic64_t kmem_cache_alloc_failed;
54 struct hlist_head kmem_table[KMEM_TABLE_SIZE];
55 struct list_head kmem_list;
58 struct hlist_head vmem_table[VMEM_TABLE_SIZE];
59 struct list_head vmem_list;
61 EXPORT_SYMBOL(kmem_alloc_used);
62 EXPORT_SYMBOL(kmem_alloc_max);
63 EXPORT_SYMBOL(vmem_alloc_used);
64 EXPORT_SYMBOL(vmem_alloc_max);
65 EXPORT_SYMBOL(kmem_warning_flag);
67 EXPORT_SYMBOL(kmem_lock);
68 EXPORT_SYMBOL(kmem_table);
69 EXPORT_SYMBOL(kmem_list);
71 EXPORT_SYMBOL(vmem_lock);
72 EXPORT_SYMBOL(vmem_table);
73 EXPORT_SYMBOL(vmem_list);
75 int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
77 int kmem_set_warning(int flag) { return 0; }
79 EXPORT_SYMBOL(kmem_set_warning);
82 * Slab allocation interfaces
84 * While the Linux slab implementation was inspired by the Solaris
85 * implemenation I cannot use it to emulate the Solaris APIs. I
86 * require two features which are not provided by the Linux slab.
88 * 1) Constructors AND destructors. Recent versions of the Linux
89 * kernel have removed support for destructors. This is a deal
90 * breaker for the SPL which contains particularly expensive
91 * initializers for mutex's, condition variables, etc. We also
92 * require a minimal level of cleaner for these data types unlike
93 * may Linux data type which do need to be explicitly destroyed.
95 * 2) Virtual address backed slab. Callers of the Solaris slab
96 * expect it to work well for both small are very large allocations.
97 * Because of memory fragmentation the Linux slab which is backed
98 * by kmalloc'ed memory performs very badly when confronted with
99 * large numbers of large allocations. Basing the slab on the
100 * virtual address space removes the need for contigeous pages
101 * and greatly improve performance for large allocations.
103 * For these reasons, the SPL has its own slab implementation with
104 * the needed features. It is not as highly optimized as either the
105 * Solaris or Linux slabs, but it should get me most of what is
106 * needed until it can be optimized or obsoleted by another approach.
108 * One serious concern I do have about this method is the relatively
109 * small virtual address space on 32bit arches. This will seriously
110 * constrain the size of the slab caches and their performance.
112 * XXX: Implement SPL proc interface to export full per cache stats.
114 * XXX: Implement work requests to keep an eye on each cache and
115 * shrink them via spl_slab_reclaim() when they are wasting lots
116 * of space. Currently this process is driven by the reapers.
118 * XXX: Implement proper small cache object support by embedding
119 * the spl_kmem_slab_t, spl_kmem_obj_t's, and objects in the
120 * allocated for a particular slab.
122 * XXX: Implement a resizable used object hash. Currently the hash
123 * is statically sized for thousands of objects but it should
124 * grow based on observed worst case slab depth.
126 * XXX: Improve the partial slab list by carefully maintaining a
127 * strict ordering of fullest to emptiest slabs based on
128 * the slab reference count. This gaurentees the when freeing
129 * slabs back to the system we need only linearly traverse the
130 * last N slabs in the list to discover all the freeable slabs.
132 * XXX: NUMA awareness for optionally allocating memory close to a
133 * particular core. This can be adventageous if you know the slab
134 * object will be short lived and primarily accessed from one core.
136 * XXX: Slab coloring may also yield performance improvements and would
137 * be desirable to implement.
139 * XXX: Proper hardware cache alignment would be good too.
142 /* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
143 * removed here to prevent a recursive substitution, we want to call
144 * the native linux version.
147 #undef kmem_cache_create
148 #undef kmem_cache_destroy
149 #undef kmem_cache_alloc
150 #undef kmem_cache_free
152 static struct list_head spl_kmem_cache_list; /* List of caches */
153 static struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
154 static kmem_cache_t *spl_slab_cache; /* Cache for slab structs */
155 static kmem_cache_t *spl_obj_cache; /* Cache for obj structs */
157 static int spl_cache_flush(spl_kmem_cache_t *skc,
158 spl_kmem_magazine_t *skm, int flush);
160 #ifdef HAVE_SET_SHRINKER
161 static struct shrinker *spl_kmem_cache_shrinker;
163 static int spl_kmem_cache_generic_shrinker(int nr_to_scan,
164 unsigned int gfp_mask);
165 static struct shrinker spl_kmem_cache_shrinker = {
166 .shrink = spl_kmem_cache_generic_shrinker,
167 .seeks = KMC_DEFAULT_SEEKS,
171 static spl_kmem_slab_t *
172 spl_slab_alloc(spl_kmem_cache_t *skc, int flags) {
173 spl_kmem_slab_t *sks;
174 spl_kmem_obj_t *sko, *n;
178 sks = kmem_cache_alloc(spl_slab_cache, flags);
182 sks->sks_magic = SKS_MAGIC;
183 sks->sks_objs = SPL_KMEM_CACHE_OBJ_PER_SLAB;
184 sks->sks_age = jiffies;
185 sks->sks_cache = skc;
186 INIT_LIST_HEAD(&sks->sks_list);
187 INIT_LIST_HEAD(&sks->sks_free_list);
190 for (i = 0; i < sks->sks_objs; i++) {
191 sko = kmem_cache_alloc(spl_obj_cache, flags);
194 /* Unable to fully construct slab, objects,
195 * and object data buffers unwind everything.
197 list_for_each_entry_safe(sko, n, &sks->sks_free_list,
199 ASSERT(sko->sko_magic == SKO_MAGIC);
200 vmem_free(sko->sko_addr, skc->skc_obj_size);
201 list_del(&sko->sko_list);
202 kmem_cache_free(spl_obj_cache, sko);
205 kmem_cache_free(spl_slab_cache, sks);
206 GOTO(out, sks = NULL);
209 sko->sko_addr = vmem_alloc(skc->skc_obj_size, flags);
210 if (sko->sko_addr == NULL) {
211 kmem_cache_free(spl_obj_cache, sko);
212 GOTO(out_alloc, sks = NULL);
215 sko->sko_magic = SKO_MAGIC;
218 INIT_LIST_HEAD(&sko->sko_list);
219 INIT_HLIST_NODE(&sko->sko_hlist);
220 list_add(&sko->sko_list, &sks->sks_free_list);
226 /* Removes slab from complete or partial list, so it must
227 * be called with the 'skc->skc_lock' held.
230 spl_slab_free(spl_kmem_slab_t *sks) {
231 spl_kmem_cache_t *skc;
232 spl_kmem_obj_t *sko, *n;
236 ASSERT(sks->sks_magic == SKS_MAGIC);
237 ASSERT(sks->sks_ref == 0);
238 skc = sks->sks_cache;
239 skc->skc_obj_total -= sks->sks_objs;
240 skc->skc_slab_total--;
242 ASSERT(spin_is_locked(&skc->skc_lock));
244 list_for_each_entry_safe(sko, n, &sks->sks_free_list, sko_list) {
245 ASSERT(sko->sko_magic == SKO_MAGIC);
247 /* Run destructors for being freed */
249 skc->skc_dtor(sko->sko_addr, skc->skc_private);
251 vmem_free(sko->sko_addr, skc->skc_obj_size);
252 list_del(&sko->sko_list);
253 kmem_cache_free(spl_obj_cache, sko);
257 ASSERT(sks->sks_objs == i);
258 list_del(&sks->sks_list);
259 kmem_cache_free(spl_slab_cache, sks);
265 __spl_slab_reclaim(spl_kmem_cache_t *skc)
267 spl_kmem_slab_t *sks, *m;
271 ASSERT(spin_is_locked(&skc->skc_lock));
273 * Free empty slabs which have not been touched in skc_delay
274 * seconds. This delay time is important to avoid thrashing.
275 * Empty slabs will be at the end of the skc_partial_list.
277 list_for_each_entry_safe_reverse(sks, m, &skc->skc_partial_list,
279 if (sks->sks_ref > 0)
282 if (time_after(jiffies, sks->sks_age + skc->skc_delay * HZ)) {
288 /* Returns number of slabs reclaimed */
293 spl_slab_reclaim(spl_kmem_cache_t *skc)
298 spin_lock(&skc->skc_lock);
299 rc = __spl_slab_reclaim(skc);
300 spin_unlock(&skc->skc_lock);
306 spl_magazine_size(spl_kmem_cache_t *skc)
311 /* Guesses for reasonable magazine sizes, they
312 * should really adapt based on observed usage. */
313 if (skc->skc_obj_size > (PAGE_SIZE * 256))
315 else if (skc->skc_obj_size > (PAGE_SIZE * 32))
317 else if (skc->skc_obj_size > (PAGE_SIZE))
319 else if (skc->skc_obj_size > (PAGE_SIZE / 4))
321 else if (skc->skc_obj_size > (PAGE_SIZE / 16))
329 static spl_kmem_magazine_t *
330 spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
332 spl_kmem_magazine_t *skm;
333 int size = sizeof(spl_kmem_magazine_t) +
334 sizeof(void *) * skc->skc_mag_size;
337 skm = kmalloc_node(size, GFP_KERNEL, node);
339 skm->skm_magic = SKM_MAGIC;
341 skm->skm_size = skc->skc_mag_size;
342 skm->skm_refill = skc->skc_mag_refill;
343 skm->skm_age = jiffies;
350 spl_magazine_free(spl_kmem_magazine_t *skm)
353 ASSERT(skm->skm_magic == SKM_MAGIC);
354 ASSERT(skm->skm_avail == 0);
360 spl_magazine_create(spl_kmem_cache_t *skc)
365 skc->skc_mag_size = spl_magazine_size(skc);
366 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
368 for_each_online_cpu(i) {
369 skc->skc_mag[i] = spl_magazine_alloc(skc, cpu_to_node(i));
370 if (!skc->skc_mag[i]) {
371 for (i--; i >= 0; i--)
372 spl_magazine_free(skc->skc_mag[i]);
382 spl_magazine_destroy(spl_kmem_cache_t *skc)
384 spl_kmem_magazine_t *skm;
388 for_each_online_cpu(i) {
389 skm = skc->skc_mag[i];
390 (void)spl_cache_flush(skc, skm, skm->skm_avail);
391 spl_magazine_free(skm);
398 spl_kmem_cache_create(char *name, size_t size, size_t align,
399 spl_kmem_ctor_t ctor,
400 spl_kmem_dtor_t dtor,
401 spl_kmem_reclaim_t reclaim,
402 void *priv, void *vmp, int flags)
404 spl_kmem_cache_t *skc;
405 int i, rc, kmem_flags = KM_SLEEP;
408 /* We may be called when there is a non-zero preempt_count or
409 * interrupts are disabled is which case we must not sleep.
411 if (current_thread_info()->preempt_count || irqs_disabled())
412 kmem_flags = KM_NOSLEEP;
414 /* Allocate new cache memory and initialize. */
415 skc = (spl_kmem_cache_t *)kmem_alloc(sizeof(*skc), kmem_flags);
419 skc->skc_magic = SKC_MAGIC;
420 skc->skc_name_size = strlen(name) + 1;
421 skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags);
422 if (skc->skc_name == NULL) {
423 kmem_free(skc, sizeof(*skc));
426 strncpy(skc->skc_name, name, skc->skc_name_size);
428 skc->skc_ctor = ctor;
429 skc->skc_dtor = dtor;
430 skc->skc_reclaim = reclaim;
431 skc->skc_private = priv;
433 skc->skc_flags = flags;
434 skc->skc_obj_size = size;
435 skc->skc_chunk_size = 0; /* XXX: Needed only when implementing */
436 skc->skc_slab_size = 0; /* small slab object optimizations */
437 skc->skc_max_chunks = 0; /* which are yet supported. */
438 skc->skc_delay = SPL_KMEM_CACHE_DELAY;
440 skc->skc_hash_bits = SPL_KMEM_CACHE_HASH_BITS;
441 skc->skc_hash_size = SPL_KMEM_CACHE_HASH_SIZE;
442 skc->skc_hash_elts = SPL_KMEM_CACHE_HASH_ELTS;
443 skc->skc_hash = (struct hlist_head *)
444 kmem_alloc(skc->skc_hash_size, kmem_flags);
445 if (skc->skc_hash == NULL) {
446 kmem_free(skc->skc_name, skc->skc_name_size);
447 kmem_free(skc, sizeof(*skc));
451 for (i = 0; i < skc->skc_hash_elts; i++)
452 INIT_HLIST_HEAD(&skc->skc_hash[i]);
454 INIT_LIST_HEAD(&skc->skc_list);
455 INIT_LIST_HEAD(&skc->skc_complete_list);
456 INIT_LIST_HEAD(&skc->skc_partial_list);
457 spin_lock_init(&skc->skc_lock);
458 skc->skc_slab_fail = 0;
459 skc->skc_slab_create = 0;
460 skc->skc_slab_destroy = 0;
461 skc->skc_slab_total = 0;
462 skc->skc_slab_alloc = 0;
463 skc->skc_slab_max = 0;
464 skc->skc_obj_total = 0;
465 skc->skc_obj_alloc = 0;
466 skc->skc_obj_max = 0;
467 skc->skc_hash_depth = 0;
468 skc->skc_hash_count = 0;
470 rc = spl_magazine_create(skc);
472 kmem_free(skc->skc_hash, skc->skc_hash_size);
473 kmem_free(skc->skc_name, skc->skc_name_size);
474 kmem_free(skc, sizeof(*skc));
478 down_write(&spl_kmem_cache_sem);
479 list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
480 up_write(&spl_kmem_cache_sem);
484 EXPORT_SYMBOL(spl_kmem_cache_create);
486 /* The caller must ensure there are no racing calls to
487 * spl_kmem_cache_alloc() for this spl_kmem_cache_t.
490 spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
492 spl_kmem_slab_t *sks, *m;
495 ASSERT(skc->skc_magic == SKC_MAGIC);
497 down_write(&spl_kmem_cache_sem);
498 list_del_init(&skc->skc_list);
499 up_write(&spl_kmem_cache_sem);
501 spl_magazine_destroy(skc);
502 spin_lock(&skc->skc_lock);
504 /* Validate there are no objects in use and free all the
505 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */
506 ASSERT(list_empty(&skc->skc_complete_list));
507 ASSERTF(skc->skc_hash_count == 0, "skc->skc_hash_count=%d\n",
508 skc->skc_hash_count);
510 list_for_each_entry_safe(sks, m, &skc->skc_partial_list, sks_list)
513 kmem_free(skc->skc_hash, skc->skc_hash_size);
514 kmem_free(skc->skc_name, skc->skc_name_size);
515 spin_unlock(&skc->skc_lock);
516 kmem_free(skc, sizeof(*skc));
520 EXPORT_SYMBOL(spl_kmem_cache_destroy);
522 /* The kernel provided hash_ptr() function behaves exceptionally badly
523 * when all the addresses are page aligned which is likely the case
524 * here. To avoid this issue shift off the low order non-random bits.
527 spl_hash_ptr(void *ptr, unsigned int bits)
529 return hash_long((unsigned long)ptr >> PAGE_SHIFT, bits);
532 static spl_kmem_obj_t *
533 spl_hash_obj(spl_kmem_cache_t *skc, void *obj)
535 struct hlist_node *node;
536 spl_kmem_obj_t *sko = NULL;
537 unsigned long key = spl_hash_ptr(obj, skc->skc_hash_bits);
540 ASSERT(skc->skc_magic == SKC_MAGIC);
541 ASSERT(spin_is_locked(&skc->skc_lock));
543 hlist_for_each_entry(sko, node, &skc->skc_hash[key], sko_hlist) {
545 if (unlikely((++i) > skc->skc_hash_depth))
546 skc->skc_hash_depth = i;
548 if (sko->sko_addr == obj) {
549 ASSERT(sko->sko_magic == SKO_MAGIC);
558 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
563 ASSERT(skc->skc_magic == SKC_MAGIC);
564 ASSERT(sks->sks_magic == SKS_MAGIC);
565 ASSERT(spin_is_locked(&skc->skc_lock));
567 sko = list_entry((&sks->sks_free_list)->next,spl_kmem_obj_t,sko_list);
568 ASSERT(sko->sko_magic == SKO_MAGIC);
569 ASSERT(sko->sko_addr != NULL);
571 /* Remove from sks_free_list and add to used hash */
572 list_del_init(&sko->sko_list);
573 key = spl_hash_ptr(sko->sko_addr, skc->skc_hash_bits);
574 hlist_add_head(&sko->sko_hlist, &skc->skc_hash[key]);
576 sks->sks_age = jiffies;
578 skc->skc_obj_alloc++;
579 skc->skc_hash_count++;
581 /* Track max obj usage statistics */
582 if (skc->skc_obj_alloc > skc->skc_obj_max)
583 skc->skc_obj_max = skc->skc_obj_alloc;
585 /* Track max slab usage statistics */
586 if (sks->sks_ref == 1) {
587 skc->skc_slab_alloc++;
589 if (skc->skc_slab_alloc > skc->skc_slab_max)
590 skc->skc_slab_max = skc->skc_slab_alloc;
593 return sko->sko_addr;
596 /* No available objects create a new slab. Since this is an
597 * expensive operation we do it without holding the spinlock
598 * and only briefly aquire it when we link in the fully
599 * allocated and constructed slab.
601 static spl_kmem_slab_t *
602 spl_cache_grow(spl_kmem_cache_t *skc, int flags)
604 spl_kmem_slab_t *sks;
608 ASSERT(skc->skc_magic == SKC_MAGIC);
610 if (flags & __GFP_WAIT) {
611 // flags |= __GFP_NOFAIL; /* XXX: Solaris assumes this */
616 sks = spl_slab_alloc(skc, flags);
618 if (flags & __GFP_WAIT)
624 /* Run all the constructors now that the slab is fully allocated */
625 list_for_each_entry(sko, &sks->sks_free_list, sko_list) {
626 ASSERT(sko->sko_magic == SKO_MAGIC);
629 skc->skc_ctor(sko->sko_addr, skc->skc_private, flags);
632 if (flags & __GFP_WAIT)
635 /* Link the new empty slab in to the end of skc_partial_list */
636 spin_lock(&skc->skc_lock);
637 skc->skc_slab_total++;
638 skc->skc_obj_total += sks->sks_objs;
639 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
640 spin_unlock(&skc->skc_lock);
646 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
648 spl_kmem_slab_t *sks;
652 ASSERT(skc->skc_magic == SKC_MAGIC);
653 ASSERT(skm->skm_magic == SKM_MAGIC);
655 /* XXX: Check for refill bouncing by age perhaps */
656 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
658 spin_lock(&skc->skc_lock);
660 /* No slabs available we must grow the cache */
661 if (list_empty(&skc->skc_partial_list)) {
662 spin_unlock(&skc->skc_lock);
663 sks = spl_cache_grow(skc, flags);
667 /* Rescheduled to different CPU skm is not local */
668 if (skm != skc->skc_mag[smp_processor_id()])
671 /* Potentially rescheduled to the same CPU but
672 * allocations may have occured from this CPU while
673 * we were sleeping so recalculate max refill. */
674 refill = MIN(refill, skm->skm_size - skm->skm_avail);
676 spin_lock(&skc->skc_lock);
680 /* Grab the next available slab */
681 sks = list_entry((&skc->skc_partial_list)->next,
682 spl_kmem_slab_t, sks_list);
683 ASSERT(sks->sks_magic == SKS_MAGIC);
684 ASSERT(sks->sks_ref < sks->sks_objs);
685 ASSERT(!list_empty(&sks->sks_free_list));
687 /* Consume as many objects as needed to refill the requested
688 * cache. We must also be careful not to overfill it. */
689 while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++rc) {
690 ASSERT(skm->skm_avail < skm->skm_size);
691 ASSERT(rc < skm->skm_size);
692 skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks);
695 /* Move slab to skc_complete_list when full */
696 if (sks->sks_ref == sks->sks_objs) {
697 list_del(&sks->sks_list);
698 list_add(&sks->sks_list, &skc->skc_complete_list);
702 spin_unlock(&skc->skc_lock);
704 /* Returns the number of entries added to cache */
709 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
711 spl_kmem_slab_t *sks = NULL;
712 spl_kmem_obj_t *sko = NULL;
715 ASSERT(skc->skc_magic == SKC_MAGIC);
716 ASSERT(spin_is_locked(&skc->skc_lock));
718 sko = spl_hash_obj(skc, obj);
719 ASSERTF(sko, "Obj %p missing from in-use hash (%d) for cache %s\n",
720 obj, skc->skc_hash_count, skc->skc_name);
723 ASSERTF(sks, "Obj %p/%p linked to invalid slab for cache %s\n",
724 obj, sko, skc->skc_name);
726 ASSERT(sks->sks_cache == skc);
727 hlist_del_init(&sko->sko_hlist);
728 list_add(&sko->sko_list, &sks->sks_free_list);
730 sks->sks_age = jiffies;
732 skc->skc_obj_alloc--;
733 skc->skc_hash_count--;
735 /* Move slab to skc_partial_list when no longer full. Slabs
736 * are added to the head to keep the partial list is quasi-full
737 * sorted order. Fuller at the head, emptier at the tail. */
738 if (sks->sks_ref == (sks->sks_objs - 1)) {
739 list_del(&sks->sks_list);
740 list_add(&sks->sks_list, &skc->skc_partial_list);
743 /* Move emply slabs to the end of the partial list so
744 * they can be easily found and freed during reclamation. */
745 if (sks->sks_ref == 0) {
746 list_del(&sks->sks_list);
747 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
748 skc->skc_slab_alloc--;
755 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
757 int i, count = MIN(flush, skm->skm_avail);
760 ASSERT(skc->skc_magic == SKC_MAGIC);
761 ASSERT(skm->skm_magic == SKM_MAGIC);
763 spin_lock(&skc->skc_lock);
764 for (i = 0; i < count; i++)
765 spl_cache_shrink(skc, skm->skm_objs[i]);
767 // __spl_slab_reclaim(skc);
768 skm->skm_avail -= count;
769 memmove(skm->skm_objs, &(skm->skm_objs[count]),
770 sizeof(void *) * skm->skm_avail);
772 spin_unlock(&skc->skc_lock);
778 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
780 spl_kmem_magazine_t *skm;
781 unsigned long irq_flags;
786 ASSERT(skc->skc_magic == SKC_MAGIC);
787 ASSERT(flags & KM_SLEEP); /* XXX: KM_NOSLEEP not yet supported */
788 local_irq_save(irq_flags);
791 /* Safe to update per-cpu structure without lock, but
792 * in the restart case we must be careful to reaquire
793 * the local magazine since this may have changed
794 * when we need to grow the cache. */
795 id = smp_processor_id();
796 ASSERTF(id < 4, "cache=%p smp_processor_id=%d\n", skc, id);
797 skm = skc->skc_mag[smp_processor_id()];
798 ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n",
799 skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm,
800 skm->skm_size, skm->skm_refill, skm->skm_avail);
802 if (likely(skm->skm_avail)) {
803 /* Object available in CPU cache, use it */
804 obj = skm->skm_objs[--skm->skm_avail];
805 skm->skm_age = jiffies;
807 /* Per-CPU cache empty, directly allocate from
808 * the slab and refill the per-CPU cache. */
809 (void)spl_cache_refill(skc, skm, flags);
810 GOTO(restart, obj = NULL);
813 local_irq_restore(irq_flags);
815 /* Pre-emptively migrate object to CPU L1 cache */
820 EXPORT_SYMBOL(spl_kmem_cache_alloc);
823 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
825 spl_kmem_magazine_t *skm;
829 ASSERT(skc->skc_magic == SKC_MAGIC);
830 local_irq_save(flags);
832 /* Safe to update per-cpu structure without lock, but
833 * no remote memory allocation tracking is being performed
834 * it is entirely possible to allocate an object from one
835 * CPU cache and return it to another. */
836 skm = skc->skc_mag[smp_processor_id()];
837 ASSERT(skm->skm_magic == SKM_MAGIC);
839 /* Per-CPU cache full, flush it to make space */
840 if (unlikely(skm->skm_avail >= skm->skm_size))
841 (void)spl_cache_flush(skc, skm, skm->skm_refill);
842 (void)spl_cache_flush(skc, skm, 1);
844 /* Available space in cache, use it */
845 skm->skm_objs[skm->skm_avail++] = obj;
847 local_irq_restore(flags);
851 EXPORT_SYMBOL(spl_kmem_cache_free);
854 spl_kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
856 spl_kmem_cache_t *skc;
858 /* Under linux a shrinker is not tightly coupled with a slab
859 * cache. In fact linux always systematically trys calling all
860 * registered shrinker callbacks until its target reclamation level
861 * is reached. Because of this we only register one shrinker
862 * function in the shim layer for all slab caches. And we always
863 * attempt to shrink all caches when this generic shrinker is called.
865 down_read(&spl_kmem_cache_sem);
867 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list)
868 spl_kmem_cache_reap_now(skc);
870 up_read(&spl_kmem_cache_sem);
872 /* XXX: Under linux we should return the remaining number of
873 * entries in the cache. We should do this as well.
879 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
881 spl_kmem_magazine_t *skm;
885 ASSERT(skc->skc_magic == SKC_MAGIC);
887 if (skc->skc_reclaim)
888 skc->skc_reclaim(skc->skc_private);
890 /* Ensure per-CPU caches which are idle gradually flush */
891 for_each_online_cpu(i) {
892 skm = skc->skc_mag[i];
894 if (time_after(jiffies, skm->skm_age + skc->skc_delay * HZ))
895 (void)spl_cache_flush(skc, skm, skm->skm_refill);
898 spl_slab_reclaim(skc);
902 EXPORT_SYMBOL(spl_kmem_cache_reap_now);
907 spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
909 EXPORT_SYMBOL(spl_kmem_reap);
917 init_rwsem(&spl_kmem_cache_sem);
918 INIT_LIST_HEAD(&spl_kmem_cache_list);
920 spl_slab_cache = NULL;
921 spl_obj_cache = NULL;
923 spl_slab_cache = __kmem_cache_create("spl_slab_cache",
924 sizeof(spl_kmem_slab_t),
926 if (spl_slab_cache == NULL)
927 GOTO(out_cache, rc = -ENOMEM);
929 spl_obj_cache = __kmem_cache_create("spl_obj_cache",
930 sizeof(spl_kmem_obj_t),
932 if (spl_obj_cache == NULL)
933 GOTO(out_cache, rc = -ENOMEM);
935 #ifdef HAVE_SET_SHRINKER
936 spl_kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
937 spl_kmem_cache_generic_shrinker);
938 if (spl_kmem_cache_shrinker == NULL)
939 GOTO(out_cache, rc = -ENOMEM);
941 register_shrinker(&spl_kmem_cache_shrinker);
946 atomic64_set(&kmem_alloc_used, 0);
947 atomic64_set(&vmem_alloc_used, 0);
948 atomic64_set(&kmem_cache_alloc_failed, 0);
950 spin_lock_init(&kmem_lock);
951 INIT_LIST_HEAD(&kmem_list);
953 for (i = 0; i < KMEM_TABLE_SIZE; i++)
954 INIT_HLIST_HEAD(&kmem_table[i]);
956 spin_lock_init(&vmem_lock);
957 INIT_LIST_HEAD(&vmem_list);
959 for (i = 0; i < VMEM_TABLE_SIZE; i++)
960 INIT_HLIST_HEAD(&vmem_table[i]);
967 (void)kmem_cache_destroy(spl_obj_cache);
970 (void)kmem_cache_destroy(spl_slab_cache);
977 spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
979 int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
982 ASSERT(str != NULL && len >= 17);
985 /* Check for a fully printable string, and while we are at
986 * it place the printable characters in the passed buffer. */
987 for (i = 0; i < size; i++) {
988 str[i] = ((char *)(kd->kd_addr))[i];
989 if (isprint(str[i])) {
992 /* Minimum number of printable characters found
993 * to make it worthwhile to print this as ascii. */
1003 sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
1004 *((uint8_t *)kd->kd_addr),
1005 *((uint8_t *)kd->kd_addr + 2),
1006 *((uint8_t *)kd->kd_addr + 4),
1007 *((uint8_t *)kd->kd_addr + 6),
1008 *((uint8_t *)kd->kd_addr + 8),
1009 *((uint8_t *)kd->kd_addr + 10),
1010 *((uint8_t *)kd->kd_addr + 12),
1011 *((uint8_t *)kd->kd_addr + 14));
1016 #endif /* DEBUG_KMEM */
1022 unsigned long flags;
1026 /* Display all unreclaimed memory addresses, including the
1027 * allocation size and the first few bytes of what's located
1028 * at that address to aid in debugging. Performance is not
1029 * a serious concern here since it is module unload time. */
1030 if (atomic64_read(&kmem_alloc_used) != 0)
1031 CWARN("kmem leaked %ld/%ld bytes\n",
1032 atomic_read(&kmem_alloc_used), kmem_alloc_max);
1034 spin_lock_irqsave(&kmem_lock, flags);
1035 if (!list_empty(&kmem_list))
1036 CDEBUG(D_WARNING, "%-16s %-5s %-16s %s:%s\n",
1037 "address", "size", "data", "func", "line");
1039 list_for_each_entry(kd, &kmem_list, kd_list)
1040 CDEBUG(D_WARNING, "%p %-5d %-16s %s:%d\n",
1041 kd->kd_addr, kd->kd_size,
1042 spl_sprintf_addr(kd, str, 17, 8),
1043 kd->kd_func, kd->kd_line);
1045 spin_unlock_irqrestore(&kmem_lock, flags);
1047 if (atomic64_read(&vmem_alloc_used) != 0)
1048 CWARN("vmem leaked %ld/%ld bytes\n",
1049 atomic_read(&vmem_alloc_used), vmem_alloc_max);
1051 spin_lock_irqsave(&vmem_lock, flags);
1052 if (!list_empty(&vmem_list))
1053 CDEBUG(D_WARNING, "%-16s %-5s %-16s %s:%s\n",
1054 "address", "size", "data", "func", "line");
1056 list_for_each_entry(kd, &vmem_list, kd_list)
1057 CDEBUG(D_WARNING, "%p %-5d %-16s %s:%d\n",
1058 kd->kd_addr, kd->kd_size,
1059 spl_sprintf_addr(kd, str, 17, 8),
1060 kd->kd_func, kd->kd_line);
1062 spin_unlock_irqrestore(&vmem_lock, flags);
1066 #ifdef HAVE_SET_SHRINKER
1067 remove_shrinker(spl_kmem_cache_shrinker);
1069 unregister_shrinker(&spl_kmem_cache_shrinker);
1072 (void)kmem_cache_destroy(spl_obj_cache);
1073 (void)kmem_cache_destroy(spl_slab_cache);