summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-11 16:02:25 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-05-11 16:02:25 -0400
commit688ab34997e70eb263d622368ffb023eabc2e590 (patch)
tree7550c56823eab910c9591849c7db8bd4bbd3234c
parenta768c157ba14aeb89f49cebed452e2a43b5f512b (diff)
alloc_hooks: Bit more reformatting
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--include/linux/gfp.h48
-rw-r--r--include/linux/slab.h60
2 files changed, 36 insertions, 72 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 2306a465f010..f704a1dc0b7a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -178,26 +178,22 @@ static inline void arch_alloc_page(struct page *page, int order) { }
struct page *_alloc_pages2(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
-#define __alloc_pages(...) \
- alloc_hooks(_alloc_pages2(__VA_ARGS__))
+#define __alloc_pages(...) alloc_hooks(_alloc_pages2(__VA_ARGS__))
struct folio *_folio_alloc2(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
-#define __folio_alloc(...) \
- alloc_hooks(_folio_alloc2(__VA_ARGS__))
+#define __folio_alloc(...) alloc_hooks(_folio_alloc2(__VA_ARGS__))
unsigned long _alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
struct list_head *page_list,
struct page **page_array);
-#define __alloc_pages_bulk(...) \
- alloc_hooks(_alloc_pages_bulk(__VA_ARGS__))
+#define __alloc_pages_bulk(...) alloc_hooks(_alloc_pages_bulk(__VA_ARGS__))
unsigned long _alloc_pages_bulk_array_mempolicy(gfp_t gfp,
unsigned long nr_pages,
struct page **page_array);
-#define alloc_pages_bulk_array_mempolicy(...) \
- alloc_hooks(_alloc_pages_bulk_array_mempolicy(__VA_ARGS__))
+#define alloc_pages_bulk_array_mempolicy(...) alloc_hooks(_alloc_pages_bulk_array_mempolicy(__VA_ARGS__))
/* Bulk allocate order-0 pages */
#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \
@@ -215,8 +211,7 @@ _alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct
return _alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
}
-#define alloc_pages_bulk_array_node(...) \
- alloc_hooks(_alloc_pages_bulk_array_node(__VA_ARGS__))
+#define alloc_pages_bulk_array_node(...) alloc_hooks(_alloc_pages_bulk_array_node(__VA_ARGS__))
static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{
@@ -245,8 +240,7 @@ _alloc_pages_node2(int nid, gfp_t gfp_mask, unsigned int order)
return _alloc_pages2(gfp_mask, order, nid, NULL);
}
-#define __alloc_pages_node(...) \
- alloc_hooks(_alloc_pages_node2(__VA_ARGS__))
+#define __alloc_pages_node(...) alloc_hooks(_alloc_pages_node2(__VA_ARGS__))
static inline
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
@@ -271,8 +265,7 @@ static inline struct page *_alloc_pages_node(int nid, gfp_t gfp_mask,
return _alloc_pages_node2(nid, gfp_mask, order);
}
-#define alloc_pages_node(...) \
- alloc_hooks(_alloc_pages_node(__VA_ARGS__))
+#define alloc_pages_node(...) alloc_hooks(_alloc_pages_node(__VA_ARGS__))
#ifdef CONFIG_NUMA
struct page *_alloc_pages(gfp_t gfp, unsigned int order);
@@ -292,12 +285,9 @@ static inline struct folio *_folio_alloc(gfp_t gfp, unsigned int order)
_folio_alloc(gfp, order)
#endif
-#define alloc_pages(...) \
- alloc_hooks(_alloc_pages(__VA_ARGS__))
-#define folio_alloc(...) \
- alloc_hooks(_folio_alloc(__VA_ARGS__))
-#define vma_alloc_folio(...) \
- alloc_hooks(_vma_alloc_folio(__VA_ARGS__))
+#define alloc_pages(...) alloc_hooks(_alloc_pages(__VA_ARGS__))
+#define folio_alloc(...) alloc_hooks(_folio_alloc(__VA_ARGS__))
+#define vma_alloc_folio(...) alloc_hooks(_vma_alloc_folio(__VA_ARGS__))
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
static inline struct page *alloc_page_vma(gfp_t gfp,
@@ -309,22 +299,18 @@ static inline struct page *alloc_page_vma(gfp_t gfp,
}
extern unsigned long _get_free_pages(gfp_t gfp_mask, unsigned int order);
-#define __get_free_pages(...) \
- alloc_hooks(_get_free_pages(__VA_ARGS__))
+#define __get_free_pages(...) alloc_hooks(_get_free_pages(__VA_ARGS__))
extern unsigned long _get_zeroed_page(gfp_t gfp_mask);
-#define get_zeroed_page(...) \
- alloc_hooks(_get_zeroed_page(__VA_ARGS__))
+#define get_zeroed_page(...) alloc_hooks(_get_zeroed_page(__VA_ARGS__))
void *_alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
-#define alloc_pages_exact(...) \
- alloc_hooks(_alloc_pages_exact(__VA_ARGS__))
+#define alloc_pages_exact(...) alloc_hooks(_alloc_pages_exact(__VA_ARGS__))
void free_pages_exact(void *virt, size_t size);
__meminit void *_alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
-#define alloc_pages_exact_nid(...) \
- alloc_hooks(_alloc_pages_exact_nid(__VA_ARGS__))
+#define alloc_pages_exact_nid(...) alloc_hooks(_alloc_pages_exact_nid(__VA_ARGS__))
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
@@ -389,13 +375,11 @@ static inline bool pm_suspended_storage(void)
/* The below functions must be run on a range from a single zone. */
extern int _alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask);
-#define alloc_contig_range(...) \
- alloc_hooks(_alloc_contig_range(__VA_ARGS__))
+#define alloc_contig_range(...) alloc_hooks(_alloc_contig_range(__VA_ARGS__))
extern struct page *_alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
-#define alloc_contig_pages(...) \
- alloc_hooks(_alloc_contig_pages(__VA_ARGS__))
+#define alloc_contig_pages(...) alloc_hooks(_alloc_contig_pages(__VA_ARGS__))
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 31213110c99a..bb48a6302e90 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -214,8 +214,7 @@ int kmem_cache_shrink(struct kmem_cache *s);
* Common kmalloc functions provided by all allocators
*/
void * __must_check _krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
-#define krealloc(...) \
- alloc_hooks(_krealloc(__VA_ARGS__))
+#define krealloc(...) alloc_hooks(_krealloc(__VA_ARGS__))
void kfree(const void *objp);
void kfree_sensitive(const void *objp);
@@ -469,13 +468,11 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz
* Return: pointer to the new object or %NULL in case of error
*/
void *_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
-#define kmem_cache_alloc(...) \
- alloc_hooks(_kmem_cache_alloc(__VA_ARGS__))
+#define kmem_cache_alloc(...) alloc_hooks(_kmem_cache_alloc(__VA_ARGS__))
void *_kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
-#define kmem_cache_alloc_lru(...) \
- alloc_hooks(_kmem_cache_alloc_lru(__VA_ARGS__))
+#define kmem_cache_alloc_lru(...) alloc_hooks(_kmem_cache_alloc_lru(__VA_ARGS__))
void kmem_cache_free(struct kmem_cache *s, void *objp);
@@ -489,8 +486,7 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
int _kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
-#define kmem_cache_alloc_bulk(...) \
- alloc_hooks(_kmem_cache_alloc_bulk(__VA_ARGS__))
+#define kmem_cache_alloc_bulk(...) alloc_hooks(_kmem_cache_alloc_bulk(__VA_ARGS__))
static __always_inline void kfree_bulk(size_t size, void **p)
{
@@ -501,8 +497,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignm
__alloc_size(1);
void *_kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;
-#define kmem_cache_alloc_node(...) \
- alloc_hooks(_kmem_cache_alloc_node(__VA_ARGS__))
+#define kmem_cache_alloc_node(...) alloc_hooks(_kmem_cache_alloc_node(__VA_ARGS__))
void *_kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_kmalloc_alignment __alloc_size(3);
@@ -510,21 +505,17 @@ void *_kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
void *_kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_kmalloc_alignment
__alloc_size(4);
-#define kmalloc_trace(...) \
- alloc_hooks(_kmalloc_trace(__VA_ARGS__))
+#define kmalloc_trace(...) alloc_hooks(_kmalloc_trace(__VA_ARGS__))
-#define kmalloc_node_trace(...) \
- alloc_hooks(_kmalloc_node_trace(__VA_ARGS__))
+#define kmalloc_node_trace(...) alloc_hooks(_kmalloc_node_trace(__VA_ARGS__))
void *_kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
__alloc_size(1);
-#define kmalloc_large(...) \
- alloc_hooks(_kmalloc_large(__VA_ARGS__))
+#define kmalloc_large(...) alloc_hooks(_kmalloc_large(__VA_ARGS__))
void *_kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
__alloc_size(1);
-#define kmalloc_large_node(...) \
- alloc_hooks(_kmalloc_large_node(__VA_ARGS__))
+#define kmalloc_large_node(...) alloc_hooks(_kmalloc_large_node(__VA_ARGS__))
/**
* kmalloc - allocate kernel memory
@@ -595,7 +586,7 @@ static __always_inline __alloc_size(1) void *_kmalloc(size_t size, gfp_t flags)
}
return __kmalloc(size, flags);
}
-#define kmalloc(...) alloc_hooks(_kmalloc(__VA_ARGS__))
+#define kmalloc(...) alloc_hooks(_kmalloc(__VA_ARGS__))
static __always_inline __alloc_size(1) void *_kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -612,8 +603,7 @@ static __always_inline __alloc_size(1) void *_kmalloc_node(size_t size, gfp_t fl
}
return __kmalloc_node(size, flags, node);
}
-#define kmalloc_node(...) \
- alloc_hooks(_kmalloc_node(__VA_ARGS__))
+#define kmalloc_node(...) alloc_hooks(_kmalloc_node(__VA_ARGS__))
/**
* kmalloc_array - allocate memory for an array.
@@ -631,8 +621,7 @@ static inline __alloc_size(1, 2) void *_kmalloc_array(size_t n, size_t size, gfp
return _kmalloc(bytes, flags);
return _kmalloc(bytes, flags);
}
-#define kmalloc_array(...) \
- alloc_hooks(_kmalloc_array(__VA_ARGS__))
+#define kmalloc_array(...) alloc_hooks(_kmalloc_array(__VA_ARGS__))
/**
* krealloc_array - reallocate memory for an array.
@@ -653,8 +642,7 @@ static inline __realloc_size(2, 3) void * __must_check _krealloc_array(void *p,
return _krealloc(p, bytes, flags);
}
-#define krealloc_array(...) \
- alloc_hooks(_krealloc_array(__VA_ARGS__))
+#define krealloc_array(...) alloc_hooks(_krealloc_array(__VA_ARGS__))
/**
* kcalloc - allocate memory for an array. The memory is set to zero.
@@ -662,8 +650,7 @@ static inline __realloc_size(2, 3) void * __must_check _krealloc_array(void *p,
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-#define kcalloc(_n, _size, _flags) \
- kmalloc_array(_n, _size, (_flags) | __GFP_ZERO)
+#define kcalloc(_n, _size, _flags) kmalloc_array(_n, _size, (_flags) | __GFP_ZERO)
void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
unsigned long caller) __alloc_size(1);
@@ -678,8 +665,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-#define kmalloc_track_caller(size, flags) \
- kmalloc_node_track_caller(size, flags, NUMA_NO_NODE)
+#define kmalloc_track_caller(size, flags) kmalloc_node_track_caller(size, flags, NUMA_NO_NODE)
static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size, gfp_t flags,
int node)
@@ -692,17 +678,14 @@ static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size
return _kmalloc_node(bytes, flags, node);
return __kmalloc_node(bytes, flags, node);
}
-#define kmalloc_array_node(...) \
- alloc_hooks(_kmalloc_array_node(__VA_ARGS__))
+#define kmalloc_array_node(...) alloc_hooks(_kmalloc_array_node(__VA_ARGS__))
-#define kcalloc_node(_n, _size, _flags, _node) \
- kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
+#define kcalloc_node(_n, _size, _flags, _node) kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
/*
* Shortcuts
*/
-#define kmem_cache_zalloc(_k, _flags) \
- kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
+#define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
/**
* kzalloc - allocate memory. The memory is set to zero.
@@ -713,8 +696,7 @@ static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size
#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
extern void *_kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
-#define kvmalloc_node(...) \
- alloc_hooks(_kvmalloc_node(__VA_ARGS__))
+#define kvmalloc_node(...) alloc_hooks(_kvmalloc_node(__VA_ARGS__))
#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
#define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO)
@@ -732,9 +714,7 @@ extern void *_kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
extern void *_kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
__realloc_size(3);
-
-#define kvrealloc(...) \
- alloc_hooks(_kvrealloc(__VA_ARGS__))
+#define kvrealloc(...) alloc_hooks(_kvrealloc(__VA_ARGS__))
extern void kvfree(const void *addr);
extern void kvfree_sensitive(const void *addr, size_t len);