From 7e2da7786ec089d1b9f9010677dc8e8a65dc01a1 Mon Sep 17 00:00:00 2001 From: loli10K Date: Tue, 14 Jan 2020 18:09:59 +0100 Subject: [PATCH] KMC_KVMEM disrupts kv_alloc() memory alignment expectations MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit On kernels with KASAN enabled the following failure can be observed as soon as the zfs module is loaded: VERIFY(IS_P2ALIGNED(ptr, PAGE_SIZE)) failed PANIC at spl-kmem-cache.c:228:kv_alloc() The problem is kmalloc() has never guaranteed aligned allocations; this requirement resulted in zfsonlinux/spl@8b45dda which removed all kmalloc() usage in kv_alloc(). Until a GFP_ALIGNED flag (or equivalent functionality) is provided by the kernel this commit partially reverts 66955885 and 6d948c35 to prevent k(v)malloc() allocations in kv_alloc(). Reviewed-by: Kjeld Schouten Reviewed-by: Michael Niewöhner Reviewed-by: Brian Behlendorf Signed-off-by: loli10K Closes #9813 --- module/os/linux/spl/spl-kmem-cache.c | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/module/os/linux/spl/spl-kmem-cache.c b/module/os/linux/spl/spl-kmem-cache.c index 4526257185d..7dd8e85436b 100644 --- a/module/os/linux/spl/spl-kmem-cache.c +++ b/module/os/linux/spl/spl-kmem-cache.c @@ -202,26 +202,8 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags) if (skc->skc_flags & KMC_KMEM) { ASSERT(ISP2(size)); ptr = (void *)__get_free_pages(lflags, get_order(size)); - } else if (skc->skc_flags & KMC_KVMEM) { - ptr = spl_kvmalloc(size, lflags); } else { - /* - * GFP_KERNEL allocations can safely use kvmalloc which may - * improve performance by avoiding a) high latency caused by - * vmalloc's on-access allocation, b) performance loss due to - * MMU memory address mapping and c) vmalloc locking overhead. - * This has the side-effect that the slab statistics will - * incorrectly report this as a vmem allocation, but that is - * purely cosmetic. - * - * For non-GFP_KERNEL allocations we stick to __vmalloc. - */ - if ((lflags & GFP_KERNEL) == GFP_KERNEL) { - ptr = spl_kvmalloc(size, lflags); - } else { - ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, - PAGE_KERNEL); - } + ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL); } /* Resulting allocated memory will be page aligned */ @@ -249,7 +231,7 @@ kv_free(spl_kmem_cache_t *skc, void *ptr, int size) ASSERT(ISP2(size)); free_pages((unsigned long)ptr, get_order(size)); } else { - spl_kmem_free_impl(ptr, size); + vfree(ptr); } } -- 2.45.2