summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-11 01:04:24 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-05-11 01:04:24 -0400
commitaa45828c9d760f0f1e7b30977772c429db7c106a (patch)
treea1630c7093db72549b5ffc7554ba9924b7766e3d
parent3faac79dca23d8f87a1c79774953f3dd90491448 (diff)
Slim down alloc_hooks
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--include/linux/alloc_tag.h5
-rw-r--r--include/linux/gfp.h62
-rw-r--r--include/linux/mempool.h6
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/percpu.h12
-rw-r--r--include/linux/slab.h54
-rw-r--r--mm/compaction.c3
7 files changed, 67 insertions, 77 deletions
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 2c3f4f3a8c93..be51f3b35e9e 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -147,9 +147,9 @@ static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
#endif
-#define alloc_hooks(_do_alloc, _res_type, _err) \
+#define alloc_hooks(_do_alloc) \
({ \
- _res_type _res; \
+ typeof(_do_alloc) _res; \
DEFINE_ALLOC_TAG(_alloc_tag, _old); \
\
_res = _do_alloc; \
@@ -157,5 +157,4 @@ static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
_res; \
})
-
#endif /* _LINUX_ALLOC_TAG_H */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0cb4a515109a..d1586b90bb90 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -179,14 +179,12 @@ static inline void arch_alloc_page(struct page *page, int order) { }
struct page *_alloc_pages2(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
#define __alloc_pages(_gfp, _order, _preferred_nid, _nodemask) \
- alloc_hooks(_alloc_pages2(_gfp, _order, _preferred_nid, \
- _nodemask), struct page *, NULL)
+ alloc_hooks(_alloc_pages2(_gfp, _order, _preferred_nid, _nodemask))
struct folio *_folio_alloc2(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
#define __folio_alloc(_gfp, _order, _preferred_nid, _nodemask) \
- alloc_hooks(_folio_alloc2(_gfp, _order, _preferred_nid, \
- _nodemask), struct folio *, NULL)
+ alloc_hooks(_folio_alloc2(_gfp, _order, _preferred_nid, _nodemask))
unsigned long _alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
@@ -194,18 +192,14 @@ unsigned long _alloc_pages_bulk(gfp_t gfp, int preferred_nid,
struct page **page_array);
#define __alloc_pages_bulk(_gfp, _preferred_nid, _nodemask, _nr_pages, \
_page_list, _page_array) \
- alloc_hooks(_alloc_pages_bulk(_gfp, _preferred_nid, \
- _nodemask, _nr_pages, \
- _page_list, _page_array), \
- unsigned long, 0)
+ alloc_hooks(_alloc_pages_bulk(_gfp, _preferred_nid, _nodemask, \
+ _nr_pages, _page_list, _page_array))
unsigned long _alloc_pages_bulk_array_mempolicy(gfp_t gfp,
unsigned long nr_pages,
struct page **page_array);
#define alloc_pages_bulk_array_mempolicy(_gfp, _nr_pages, _page_array) \
- alloc_hooks(_alloc_pages_bulk_array_mempolicy(_gfp, \
- _nr_pages, _page_array), \
- unsigned long, 0)
+ alloc_hooks(_alloc_pages_bulk_array_mempolicy(_gfp, _nr_pages, _page_array))
/* Bulk allocate order-0 pages */
#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \
@@ -224,8 +218,7 @@ _alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct
}
#define alloc_pages_bulk_array_node(_gfp, _nid, _nr_pages, _page_array) \
- alloc_hooks(_alloc_pages_bulk_array_node(_gfp, _nid, _nr_pages, _page_array), \
- unsigned long, 0)
+ alloc_hooks(_alloc_pages_bulk_array_node(_gfp, _nid, _nr_pages, _page_array))
static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{
@@ -255,8 +248,7 @@ _alloc_pages_node2(int nid, gfp_t gfp_mask, unsigned int order)
}
#define __alloc_pages_node(_nid, _gfp_mask, _order) \
- alloc_hooks(_alloc_pages_node2(_nid, _gfp_mask, _order), \
- struct page *, NULL)
+ alloc_hooks(_alloc_pages_node2(_nid, _gfp_mask, _order))
static inline
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
@@ -282,8 +274,7 @@ static inline struct page *_alloc_pages_node(int nid, gfp_t gfp_mask,
}
#define alloc_pages_node(_nid, _gfp_mask, _order) \
- alloc_hooks(_alloc_pages_node(_nid, _gfp_mask, _order), \
- struct page *, NULL)
+ alloc_hooks(_alloc_pages_node(_nid, _gfp_mask, _order))
#ifdef CONFIG_NUMA
struct page *_alloc_pages(gfp_t gfp, unsigned int order);
@@ -304,12 +295,11 @@ static inline struct folio *_folio_alloc(gfp_t gfp, unsigned int order)
#endif
#define alloc_pages(_gfp, _order) \
- alloc_hooks(_alloc_pages(_gfp, _order), struct page *, NULL)
+ alloc_hooks(_alloc_pages(_gfp, _order))
#define folio_alloc(_gfp, _order) \
- alloc_hooks(_folio_alloc(_gfp, _order), struct folio *, NULL)
+ alloc_hooks(_folio_alloc(_gfp, _order))
#define vma_alloc_folio(_gfp, _order, _vma, _addr, _hugepage) \
- alloc_hooks(_vma_alloc_folio(_gfp, _order, _vma, _addr, \
- _hugepage), struct folio *, NULL)
+ alloc_hooks(_vma_alloc_folio(_gfp, _order, _vma, _addr, _hugepage))
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
static inline struct page *alloc_page_vma(gfp_t gfp,
@@ -322,25 +312,27 @@ static inline struct page *alloc_page_vma(gfp_t gfp,
extern unsigned long _get_free_pages(gfp_t gfp_mask, unsigned int order);
#define __get_free_pages(_gfp_mask, _order) \
- alloc_hooks(_get_free_pages(_gfp_mask, _order), unsigned long, 0)
+ alloc_hooks(_get_free_pages(_gfp_mask, _order))
+
extern unsigned long _get_zeroed_page(gfp_t gfp_mask);
-#define get_zeroed_page(_gfp_mask) \
- alloc_hooks(_get_zeroed_page(_gfp_mask), unsigned long, 0)
+#define get_zeroed_page(_gfp_mask) \
+ alloc_hooks(_get_zeroed_page(_gfp_mask))
void *_alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
#define alloc_pages_exact(_size, _gfp_mask) \
- alloc_hooks(_alloc_pages_exact(_size, _gfp_mask), void *, NULL)
+ alloc_hooks(_alloc_pages_exact(_size, _gfp_mask))
+
void free_pages_exact(void *virt, size_t size);
__meminit void *_alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
-#define alloc_pages_exact_nid(_nid, _size, _gfp_mask) \
- alloc_hooks(_alloc_pages_exact_nid(_nid, _size, _gfp_mask), void *, NULL)
+#define alloc_pages_exact_nid(_nid, _size, _gfp_mask) \
+ alloc_hooks(_alloc_pages_exact_nid(_nid, _size, _gfp_mask))
-#define __get_free_page(gfp_mask) \
- __get_free_pages((gfp_mask), 0)
+#define __get_free_page(gfp_mask) \
+ __get_free_pages((gfp_mask), 0)
-#define __get_dma_pages(gfp_mask, order) \
- __get_free_pages((gfp_mask) | GFP_DMA, (order))
+#define __get_dma_pages(gfp_mask, order) \
+ __get_free_pages((gfp_mask) | GFP_DMA, (order))
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
@@ -400,13 +392,13 @@ static inline bool pm_suspended_storage(void)
extern int _alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask);
#define alloc_contig_range(_start, _end, _migratetype, _gfp_mask) \
- alloc_hooks(_alloc_contig_range(_start, _end, _migratetype, \
- _gfp_mask), int, -ENOMEM)
+ alloc_hooks(_alloc_contig_range(_start, _end, _migratetype, _gfp_mask))
+
extern struct page *_alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
#define alloc_contig_pages(_nr_pages, _gfp_mask, _nid, _nodemask) \
- alloc_hooks(_alloc_contig_pages(_nr_pages, _gfp_mask, _nid, \
- _nodemask), struct page *, NULL)
+ alloc_hooks(_alloc_contig_pages(_nr_pages, _gfp_mask, _nid, _nodemask))
+
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index aa6e886b01d7..383910f9f683 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -45,7 +45,7 @@ int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
int _mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data);
#define mempool_init(...) \
- alloc_hooks(_mempool_init(__VA_ARGS__), int, -ENOMEM)
+ alloc_hooks(_mempool_init(__VA_ARGS__))
extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data);
@@ -54,7 +54,7 @@ extern mempool_t *_mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int nid);
#define mempool_create_node(...) \
- alloc_hooks(_mempool_create_node(__VA_ARGS__), mempool_t *, NULL)
+ alloc_hooks(_mempool_create_node(__VA_ARGS__))
#define mempool_create(_min_nr, _alloc_fn, _free_fn, _pool_data) \
mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \
@@ -65,7 +65,7 @@ extern void mempool_destroy(mempool_t *pool);
extern void *_mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
#define mempool_alloc(_pool, _gfp) \
- alloc_hooks(_mempool_alloc((_pool), (_gfp)), void *, NULL)
+ alloc_hooks(_mempool_alloc((_pool), (_gfp)))
extern void mempool_free(void *element, mempool_t *pool);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b2efafa001f8..8d423384062a 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -476,7 +476,7 @@ static inline struct folio *_filemap_alloc_folio(gfp_t gfp, unsigned int order)
#endif
#define filemap_alloc_folio(_gfp, _order) \
- alloc_hooks(_filemap_alloc_folio(_gfp, _order), struct folio *, NULL)
+ alloc_hooks(_filemap_alloc_folio(_gfp, _order))
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 51ec257379af..b7f444df1c3e 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -128,12 +128,12 @@ extern void __init setup_per_cpu_areas(void);
extern void __percpu *__pcpu_alloc(size_t size, size_t align, bool reserved,
gfp_t gfp) __alloc_size(1);
-#define __alloc_percpu_gfp(_size, _align, _gfp) alloc_hooks( \
- __pcpu_alloc(_size, _align, false, _gfp), void __percpu *, NULL)
-#define __alloc_percpu(_size, _align) alloc_hooks( \
- __pcpu_alloc(_size, _align, false, GFP_KERNEL), void __percpu *, NULL)
-#define __alloc_reserved_percpu(_size, _align) alloc_hooks( \
- __pcpu_alloc(_size, _align, true, GFP_KERNEL), void __percpu *, NULL)
+#define __alloc_percpu_gfp(_size, _align, _gfp) \
+ alloc_hooks(__pcpu_alloc(_size, _align, false, _gfp))
+#define __alloc_percpu(_size, _align) \
+ alloc_hooks(__pcpu_alloc(_size, _align, false, GFP_KERNEL))
+#define __alloc_reserved_percpu(_size, _align) \
+ alloc_hooks(__pcpu_alloc(_size, _align, true, GFP_KERNEL))
#define alloc_percpu_gfp(type, gfp) \
(typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 43c922524081..594e96f8c128 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -215,7 +215,7 @@ int kmem_cache_shrink(struct kmem_cache *s);
*/
void * __must_check _krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
#define krealloc(_p, _size, _flags) \
- alloc_hooks(_krealloc(_p, _size, _flags), void*, NULL)
+ alloc_hooks(_krealloc(_p, _size, _flags))
void kfree(const void *objp);
void kfree_sensitive(const void *objp);
@@ -470,12 +470,12 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz
*/
void *_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
#define kmem_cache_alloc(_s, _flags) \
- alloc_hooks(_kmem_cache_alloc(_s, _flags), void*, NULL)
+ alloc_hooks(_kmem_cache_alloc(_s, _flags))
void *_kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_lru(_s, _lru, _flags) \
- alloc_hooks(_kmem_cache_alloc_lru(_s, _lru, _flags), void*, NULL)
+ alloc_hooks(_kmem_cache_alloc_lru(_s, _lru, _flags))
void kmem_cache_free(struct kmem_cache *s, void *objp);
@@ -487,9 +487,10 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
* Note that interrupts must be enabled when calling these functions.
*/
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
+
int _kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
#define kmem_cache_alloc_bulk(_s, _flags, _size, _p) \
- alloc_hooks(_kmem_cache_alloc_bulk(_s, _flags, _size, _p), int, 0)
+ alloc_hooks(_kmem_cache_alloc_bulk(_s, _flags, _size, _p))
static __always_inline void kfree_bulk(size_t size, void **p)
{
@@ -500,8 +501,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignm
__alloc_size(1);
void *_kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;
-#define kmem_cache_alloc_node(_s, _flags, _node) \
- alloc_hooks(_kmem_cache_alloc_node(_s, _flags, _node), void*, NULL)
+#define kmem_cache_alloc_node(_s, _flags, _node) \
+ alloc_hooks(_kmem_cache_alloc_node(_s, _flags, _node))
void *_kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_kmalloc_alignment __alloc_size(3);
@@ -509,21 +510,21 @@ void *_kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
void *_kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_kmalloc_alignment
__alloc_size(4);
-#define kmalloc_trace(_s, _flags, _size) \
- alloc_hooks(_kmalloc_trace(_s, _flags, _size), void*, NULL)
+#define kmalloc_trace(_s, _flags, _size) \
+ alloc_hooks(_kmalloc_trace(_s, _flags, _size))
-#define kmalloc_node_trace(_s, _gfpflags, _node, _size) \
- alloc_hooks(_kmalloc_node_trace(_s, _gfpflags, _node, _size), void*, NULL)
+#define kmalloc_node_trace(_s, _gfpflags, _node, _size) \
+ alloc_hooks(_kmalloc_node_trace(_s, _gfpflags, _node, _size))
void *_kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
__alloc_size(1);
-#define kmalloc_large(_size, _flags) \
- alloc_hooks(_kmalloc_large(_size, _flags), void*, NULL)
+#define kmalloc_large(_size, _flags) \
+ alloc_hooks(_kmalloc_large(_size, _flags))
void *_kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
__alloc_size(1);
-#define kmalloc_large_node(_size, _flags, _node) \
- alloc_hooks(_kmalloc_large_node(_size, _flags, _node), void*, NULL)
+#define kmalloc_large_node(_size, _flags, _node) \
+ alloc_hooks(_kmalloc_large_node(_size, _flags, _node))
/**
* kmalloc - allocate kernel memory
@@ -594,7 +595,7 @@ static __always_inline __alloc_size(1) void *_kmalloc(size_t size, gfp_t flags)
}
return __kmalloc(size, flags);
}
-#define kmalloc(_size, _flags) alloc_hooks(_kmalloc(_size, _flags), void*, NULL)
+#define kmalloc(_size, _flags) alloc_hooks(_kmalloc(_size, _flags))
static __always_inline __alloc_size(1) void *_kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -612,7 +613,7 @@ static __always_inline __alloc_size(1) void *_kmalloc_node(size_t size, gfp_t fl
return __kmalloc_node(size, flags, node);
}
#define kmalloc_node(_size, _flags, _node) \
- alloc_hooks(_kmalloc_node(_size, _flags, _node), void*, NULL)
+ alloc_hooks(_kmalloc_node(_size, _flags, _node))
/**
* kmalloc_array - allocate memory for an array.
@@ -630,8 +631,8 @@ static inline __alloc_size(1, 2) void *_kmalloc_array(size_t n, size_t size, gfp
return _kmalloc(bytes, flags);
return _kmalloc(bytes, flags);
}
-#define kmalloc_array(_n, _size, _flags) \
- alloc_hooks(_kmalloc_array(_n, _size, _flags), void*, NULL)
+#define kmalloc_array(_n, _size, _flags) \
+ alloc_hooks(_kmalloc_array(_n, _size, _flags))
/**
* krealloc_array - reallocate memory for an array.
@@ -652,8 +653,8 @@ static inline __realloc_size(2, 3) void * __must_check _krealloc_array(void *p,
return _krealloc(p, bytes, flags);
}
-#define krealloc_array(_p, _n, _size, _flags) \
- alloc_hooks(_krealloc_array(_p, _n, _size, _flags), void*, NULL)
+#define krealloc_array(_p, _n, _size, _flags) \
+ alloc_hooks(_krealloc_array(_p, _n, _size, _flags))
/**
* kcalloc - allocate memory for an array. The memory is set to zero.
@@ -661,14 +662,13 @@ static inline __realloc_size(2, 3) void * __must_check _krealloc_array(void *p,
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-#define kcalloc(_n, _size, _flags) \
+#define kcalloc(_n, _size, _flags) \
kmalloc_array(_n, _size, (_flags) | __GFP_ZERO)
void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
unsigned long caller) __alloc_size(1);
-#define kmalloc_node_track_caller(size, flags, node) \
- alloc_hooks(__kmalloc_node_track_caller(size, flags, node, \
- _RET_IP_), void*, NULL)
+#define kmalloc_node_track_caller(size, flags, node) \
+ alloc_hooks(__kmalloc_node_track_caller(size, flags, node, _RET_IP_))
/*
* kmalloc_track_caller is a special version of kmalloc that records the
@@ -693,7 +693,7 @@ static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size
return __kmalloc_node(bytes, flags, node);
}
#define kmalloc_array_node(_n, _size, _flags, _node) \
- alloc_hooks(_kmalloc_array_node(_n, _size, _flags, _node), void*, NULL)
+ alloc_hooks(_kmalloc_array_node(_n, _size, _flags, _node))
#define kcalloc_node(_n, _size, _flags, _node) \
kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
@@ -714,7 +714,7 @@ static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size
extern void *_kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
#define kvmalloc_node(_size, _flags, _node) \
- alloc_hooks(_kvmalloc_node(_size, _flags, _node), void*, NULL)
+ alloc_hooks(_kvmalloc_node(_size, _flags, _node))
#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
#define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO)
@@ -734,7 +734,7 @@ extern void *_kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t fla
__realloc_size(3);
#define kvrealloc(_p, _oldsize, _newsize, _flags) \
- alloc_hooks(_kvrealloc(_p, _oldsize, _newsize, _flags), void*, NULL)
+ alloc_hooks(_kvrealloc(_p, _oldsize, _newsize, _flags))
extern void kvfree(const void *addr);
extern void kvfree_sensitive(const void *addr, size_t len);
diff --git a/mm/compaction.c b/mm/compaction.c
index 32707fb62495..4343fb3338b1 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1707,8 +1707,7 @@ static struct page *_compaction_alloc(struct page *migratepage,
static struct page *compaction_alloc(struct page *migratepage,
unsigned long data)
{
- return alloc_hooks(_compaction_alloc(migratepage, data),
- struct page *, NULL);
+ return alloc_hooks(_compaction_alloc(migratepage, data));
}
/*