summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-01-21 08:10:44 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:33:31 -0800
commit01e2e8394a527644de5192f92f64e1c883a3e493 (patch)
tree46419da5624eceb83c9b5c789706f0fbd9adca4b /mm/vmalloc.c
parentf41f036b804d0d920f9b6fd3fca9489dd7afd358 (diff)
downloadlinux-next-01e2e8394a527644de5192f92f64e1c883a3e493.tar.gz
mm: remove __vfree_deferred
Fold __vfree_deferred into vfree_atomic, and call vfree_atomic early on from vfree if called from interrupt context so that the extra low-level helper can be avoided. Link: https://lkml.kernel.org/r/20230121071051.1143058-4-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c43
1 files changed, 17 insertions, 26 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 67fc9d7e4024..cfd796570e61 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2754,20 +2754,6 @@ static void __vunmap(const void *addr, int deallocate_pages)
kfree(area);
}
-static inline void __vfree_deferred(const void *addr)
-{
- /*
- * Use raw_cpu_ptr() because this can be called from preemptible
- * context. Preemption is absolutely fine here, because the llist_add()
- * implementation is lockless, so it works even if we are adding to
- * another cpu's list. schedule_work() should be fine with this too.
- */
- struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
-
- if (llist_add((struct llist_node *)addr, &p->list))
- schedule_work(&p->wq);
-}
-
/**
* vfree_atomic - release memory allocated by vmalloc()
* @addr: memory base address
@@ -2777,13 +2763,19 @@ static inline void __vfree_deferred(const void *addr)
*/
void vfree_atomic(const void *addr)
{
- BUG_ON(in_nmi());
+ struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
+ BUG_ON(in_nmi());
kmemleak_free(addr);
- if (!addr)
- return;
- __vfree_deferred(addr);
+ /*
+ * Use raw_cpu_ptr() because this can be called from preemptible
+ * context. Preemption is absolutely fine here, because the llist_add()
+ * implementation is lockless, so it works even if we are adding to
+ * another cpu's list. schedule_work() should be fine with this too.
+ */
+ if (addr && llist_add((struct llist_node *)addr, &p->list))
+ schedule_work(&p->wq);
}
/**
@@ -2805,17 +2797,16 @@ void vfree_atomic(const void *addr)
*/
void vfree(const void *addr)
{
- BUG_ON(in_nmi());
+ if (unlikely(in_interrupt())) {
+ vfree_atomic(addr);
+ return;
+ }
+ BUG_ON(in_nmi());
kmemleak_free(addr);
+ might_sleep();
- might_sleep_if(!in_interrupt());
-
- if (!addr)
- return;
- if (unlikely(in_interrupt()))
- __vfree_deferred(addr);
- else
+ if (addr)
__vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);