summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 12:33:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 12:33:21 -0700
commitc5c009e2503d4c027591c65b49a98f420cb4fa56 (patch)
treec7ae51ab798d8f0e348b84cc1369ed4c3567080a /mm/slub.c
parent1bc191051dca28fa6d20fd1dc34a1903e7d4fb62 (diff)
parent94fa31e99b57ce4a56e93815421566d483186cb4 (diff)
Merge tag 'slab-for-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: - A few non-trivial SLUB code cleanups, most notably a refactoring of deactivate_slab(). - A bunch of trivial changes, such as removal of unused parameters, making stuff static, and employing helper functions. * tag 'slab-for-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm: slub: Delete useless parameter of alloc_slab_page() mm: slab: Delete unused SLAB_DEACTIVATED flag mm/slub: remove forced_order parameter in calculate_sizes mm/slub: refactor deactivate_slab() mm/slub: limit number of node partial slabs only in cache creation mm/slub: use helper macro __ATTR_XX_MODE for SLAB_ATTR(_RO) mm/slab_common: use helper function is_power_of_2() mm/slob: make kmem_cache_boot static
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c130
1 files changed, 52 insertions, 78 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 07cdd999c3fe..74d92aa4a3a2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1788,8 +1788,8 @@ static void *setup_object(struct kmem_cache *s, struct slab *slab,
/*
* Slab allocation and freeing
*/
-static inline struct slab *alloc_slab_page(struct kmem_cache *s,
- gfp_t flags, int node, struct kmem_cache_order_objects oo)
+static inline struct slab *alloc_slab_page(gfp_t flags, int node,
+ struct kmem_cache_order_objects oo)
{
struct folio *folio;
struct slab *slab;
@@ -1941,7 +1941,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
- slab = alloc_slab_page(s, alloc_gfp, node, oo);
+ slab = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!slab)) {
oo = s->min;
alloc_gfp = flags;
@@ -1949,7 +1949,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
- slab = alloc_slab_page(s, alloc_gfp, node, oo);
+ slab = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!slab))
goto out;
stat(s, ORDER_FALLBACK);
@@ -2348,10 +2348,10 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
void *freelist)
{
- enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
+ enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST };
struct kmem_cache_node *n = get_node(s, slab_nid(slab));
- int lock = 0, free_delta = 0;
- enum slab_modes l = M_NONE, m = M_NONE;
+ int free_delta = 0;
+ enum slab_modes mode = M_NONE;
void *nextfree, *freelist_iter, *freelist_tail;
int tail = DEACTIVATE_TO_HEAD;
unsigned long flags = 0;
@@ -2393,14 +2393,10 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
* Ensure that the slab is unfrozen while the list presence
* reflects the actual number of objects during unfreeze.
*
- * We setup the list membership and then perform a cmpxchg
- * with the count. If there is a mismatch then the slab
- * is not unfrozen but the slab is on the wrong list.
- *
- * Then we restart the process which may have to remove
- * the slab from the list that we just put it on again
- * because the number of objects in the slab may have
- * changed.
+ * We first perform cmpxchg holding lock and insert to list
+ * when it succeed. If there is mismatch then the slab is not
+ * unfrozen and number of objects in the slab may have changed.
+ * Then release lock and retry cmpxchg again.
*/
redo:
@@ -2419,61 +2415,52 @@ redo:
new.frozen = 0;
- if (!new.inuse && n->nr_partial >= s->min_partial)
- m = M_FREE;
- else if (new.freelist) {
- m = M_PARTIAL;
- if (!lock) {
- lock = 1;
- /*
- * Taking the spinlock removes the possibility that
- * acquire_slab() will see a slab that is frozen
- */
- spin_lock_irqsave(&n->list_lock, flags);
- }
+ if (!new.inuse && n->nr_partial >= s->min_partial) {
+ mode = M_FREE;
+ } else if (new.freelist) {
+ mode = M_PARTIAL;
+ /*
+ * Taking the spinlock removes the possibility that
+ * acquire_slab() will see a slab that is frozen
+ */
+ spin_lock_irqsave(&n->list_lock, flags);
+ } else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) {
+ mode = M_FULL;
+ /*
+ * This also ensures that the scanning of full
+ * slabs from diagnostic functions will not see
+ * any frozen slabs.
+ */
+ spin_lock_irqsave(&n->list_lock, flags);
} else {
- m = M_FULL;
- if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) {
- lock = 1;
- /*
- * This also ensures that the scanning of full
- * slabs from diagnostic functions will not see
- * any frozen slabs.
- */
- spin_lock_irqsave(&n->list_lock, flags);
- }
+ mode = M_FULL_NOLIST;
}
- if (l != m) {
- if (l == M_PARTIAL)
- remove_partial(n, slab);
- else if (l == M_FULL)
- remove_full(s, n, slab);
-
- if (m == M_PARTIAL)
- add_partial(n, slab, tail);
- else if (m == M_FULL)
- add_full(s, n, slab);
- }
- l = m;
if (!cmpxchg_double_slab(s, slab,
old.freelist, old.counters,
new.freelist, new.counters,
- "unfreezing slab"))
+ "unfreezing slab")) {
+ if (mode == M_PARTIAL || mode == M_FULL)
+ spin_unlock_irqrestore(&n->list_lock, flags);
goto redo;
+ }
- if (lock)
- spin_unlock_irqrestore(&n->list_lock, flags);
- if (m == M_PARTIAL)
+ if (mode == M_PARTIAL) {
+ add_partial(n, slab, tail);
+ spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, tail);
- else if (m == M_FULL)
- stat(s, DEACTIVATE_FULL);
- else if (m == M_FREE) {
+ } else if (mode == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
discard_slab(s, slab);
stat(s, FREE_SLAB);
+ } else if (mode == M_FULL) {
+ add_full(s, n, slab);
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ stat(s, DEACTIVATE_FULL);
+ } else if (mode == M_FULL_NOLIST) {
+ stat(s, DEACTIVATE_FULL);
}
}
@@ -4014,15 +4001,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
return 1;
}
-static void set_min_partial(struct kmem_cache *s, unsigned long min)
-{
- if (min < MIN_PARTIAL)
- min = MIN_PARTIAL;
- else if (min > MAX_PARTIAL)
- min = MAX_PARTIAL;
- s->min_partial = min;
-}
-
static void set_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
@@ -4060,7 +4038,7 @@ static void set_cpu_partial(struct kmem_cache *s)
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
*/
-static int calculate_sizes(struct kmem_cache *s, int forced_order)
+static int calculate_sizes(struct kmem_cache *s)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
@@ -4164,10 +4142,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
size = ALIGN(size, s->align);
s->size = size;
s->reciprocal_size = reciprocal_value(size);
- if (forced_order >= 0)
- order = forced_order;
- else
- order = calculate_order(size);
+ order = calculate_order(size);
if ((int)order < 0)
return 0;
@@ -4203,7 +4178,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
s->random = get_random_long();
#endif
- if (!calculate_sizes(s, -1))
+ if (!calculate_sizes(s))
goto error;
if (disable_higher_order_debug) {
/*
@@ -4213,7 +4188,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
if (get_order(s->size) > get_order(s->object_size)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
- if (!calculate_sizes(s, -1))
+ if (!calculate_sizes(s))
goto error;
}
}
@@ -4229,7 +4204,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
* The larger the object size is, the more slabs we want on the partial
* list to avoid pounding the page allocator excessively.
*/
- set_min_partial(s, ilog2(s->size) / 2);
+ s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
+ s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
set_cpu_partial(s);
@@ -5358,12 +5334,10 @@ struct slab_attribute {
};
#define SLAB_ATTR_RO(_name) \
- static struct slab_attribute _name##_attr = \
- __ATTR(_name, 0400, _name##_show, NULL)
+ static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
#define SLAB_ATTR(_name) \
- static struct slab_attribute _name##_attr = \
- __ATTR(_name, 0600, _name##_show, _name##_store)
+ static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
{
@@ -5410,7 +5384,7 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
if (err)
return err;
- set_min_partial(s, min);
+ s->min_partial = min;
return length;
}
SLAB_ATTR(min_partial);