summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-07-10 10:25:32 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2014-07-10 10:25:32 +1000
commit85491dc14b3d60a1cd63e2adc55c24ea33899f72 (patch)
treecdf9615660883460a0ab510649e9af6d20b4228e /mm/slub.c
parent137fad2291e00d16d7e448aac880947194550ac7 (diff)
slub: make dead memcg caches discard free slabs immediately
Since a dead memcg cache is destroyed only after the last slab allocated to it is freed, we must disable caching of empty slabs for such caches, otherwise they will be hanging around forever. This patch makes SLUB discard dead memcg caches' slabs as soon as they become empty. To achieve that, it disables per cpu partial lists for dead caches (see put_cpu_partial) and forbids keeping empty slabs on per node partial lists by setting cache's min_partial to 0 on kmem_cache_shrink, which is always called on memcg offline (see memcg_unregister_all_caches). Thanks to Joonsoo Kim. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 15f98092dddd..904a5e919981 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2034,6 +2034,14 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
!= oldpage);
+
+ if (memcg_cache_dead(s)) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
+ local_irq_restore(flags);
+ }
#endif
}
@@ -3372,6 +3380,9 @@ int __kmem_cache_shrink(struct kmem_cache *s)
kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
unsigned long flags;
+ if (memcg_cache_dead(s))
+ s->min_partial = 0;
+
if (!slabs_by_inuse) {
/*
* Do not fail shrinking empty slabs if allocation of the