From 452647784b2fccfdeeb976f6f842c6719fb2daac Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:24:21 -0700 Subject: mm: memcontrol: cleanup kmem charge functions - Handle memcg_kmem_enabled check out to the caller. This reduces the number of function definitions making the code easier to follow. At the same time it doesn't result in code bloat, because all of these functions are used only in one or two places. - Move __GFP_ACCOUNT check to the caller as well so that one wouldn't have to dive deep into memcg implementation to see which allocations are charged and which are not. - Refresh comments. Link: http://lkml.kernel.org/r/52882a28b542c1979fd9a033b4dc8637fc347399.1464079537.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Cc: Michal Hocko Cc: Eric Dumazet Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 75 ++++++++++++++++++++++++++++++++++++++++++--------------- mm/page_alloc.c | 9 ++++--- mm/slab.h | 16 +++++++++--- 3 files changed, 73 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index caea25a21c70..089ef3614155 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2273,20 +2273,30 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, current->memcg_kmem_skip_account = 0; } -/* +static inline bool memcg_kmem_bypass(void) +{ + if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) + return true; + return false; +} + +/** + * memcg_kmem_get_cache: select the correct per-memcg cache for allocation + * @cachep: the original global kmem cache + * * Return the kmem_cache we're supposed to use for a slab allocation. * We try to use the current memcg's version of the cache. * - * If the cache does not exist yet, if we are the first user of it, - * we either create it immediately, if possible, or create it asynchronously - * in a workqueue. - * In the latter case, we will let the current allocation go through with - * the original cache. + * If the cache does not exist yet, if we are the first user of it, we + * create it asynchronously in a workqueue and let the current allocation + * go through with the original cache. * - * Can't be called in interrupt context or from kernel threads. - * This function needs to be called with rcu_read_lock() held. + * This function takes a reference to the cache it returns to assure it + * won't get destroyed while we are working with it. Once the caller is + * done with it, memcg_kmem_put_cache() must be called to release the + * reference. */ -struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) { struct mem_cgroup *memcg; struct kmem_cache *memcg_cachep; @@ -2294,10 +2304,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) VM_BUG_ON(!is_root_cache(cachep)); - if (cachep->flags & SLAB_ACCOUNT) - gfp |= __GFP_ACCOUNT; - - if (!(gfp & __GFP_ACCOUNT)) + if (memcg_kmem_bypass()) return cachep; if (current->memcg_kmem_skip_account) @@ -2330,14 +2337,27 @@ out: return cachep; } -void __memcg_kmem_put_cache(struct kmem_cache *cachep) +/** + * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache + * @cachep: the cache returned by memcg_kmem_get_cache + */ +void memcg_kmem_put_cache(struct kmem_cache *cachep) { if (!is_root_cache(cachep)) css_put(&cachep->memcg_params.memcg->css); } -int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, - struct mem_cgroup *memcg) +/** + * memcg_kmem_charge: charge a kmem page + * @page: page to charge + * @gfp: reclaim mode + * @order: allocation order + * @memcg: memory cgroup to charge + * + * Returns 0 on success, an error code on failure. + */ +int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg) { unsigned int nr_pages = 1 << order; struct page_counter *counter; @@ -2358,19 +2378,34 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, return 0; } -int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) +/** + * memcg_kmem_charge: charge a kmem page to the current memory cgroup + * @page: page to charge + * @gfp: reclaim mode + * @order: allocation order + * + * Returns 0 on success, an error code on failure. + */ +int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { struct mem_cgroup *memcg; int ret = 0; + if (memcg_kmem_bypass()) + return 0; + memcg = get_mem_cgroup_from_mm(current->mm); if (!mem_cgroup_is_root(memcg)) - ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); + ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); css_put(&memcg->css); return ret; } - -void __memcg_kmem_uncharge(struct page *page, int order) +/** + * memcg_kmem_uncharge: uncharge a kmem page + * @page: page to uncharge + * @order: allocation order + */ +void memcg_kmem_uncharge(struct page *page, int order) { struct mem_cgroup *memcg = page->mem_cgroup; unsigned int nr_pages = 1 << order; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 13cf4c665321..de2491c42d4f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4009,7 +4009,8 @@ struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) struct page *page; page = alloc_pages(gfp_mask, order); - if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { + if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && + page && memcg_kmem_charge(page, gfp_mask, order) != 0) { __free_pages(page, order); page = NULL; } @@ -4021,7 +4022,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) struct page *page; page = alloc_pages_node(nid, gfp_mask, order); - if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { + if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && + page && memcg_kmem_charge(page, gfp_mask, order) != 0) { __free_pages(page, order); page = NULL; } @@ -4034,7 +4036,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) */ void __free_kmem_pages(struct page *page, unsigned int order) { - memcg_kmem_uncharge(page, order); + if (memcg_kmem_enabled()) + memcg_kmem_uncharge(page, order); __free_pages(page, order); } diff --git a/mm/slab.h b/mm/slab.h index 5fa8b8f20eb1..f33980ab0406 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -254,8 +254,7 @@ static __always_inline int memcg_charge_slab(struct page *page, if (is_root_cache(s)) return 0; - ret = __memcg_kmem_charge_memcg(page, gfp, order, - s->memcg_params.memcg); + ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); if (ret) return ret; @@ -269,6 +268,9 @@ static __always_inline int memcg_charge_slab(struct page *page, static __always_inline void memcg_uncharge_slab(struct page *page, int order, struct kmem_cache *s) { + if (!memcg_kmem_enabled()) + return; + memcg_kmem_update_page_stat(page, (s->flags & SLAB_RECLAIM_ACCOUNT) ? MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, @@ -391,7 +393,11 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, if (should_failslab(s, flags)) return NULL; - return memcg_kmem_get_cache(s, flags); + if (memcg_kmem_enabled() && + ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) + return memcg_kmem_get_cache(s); + + return s; } static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, @@ -408,7 +414,9 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, s->flags, flags); kasan_slab_alloc(s, object, flags); } - memcg_kmem_put_cache(s); + + if (memcg_kmem_enabled()) + memcg_kmem_put_cache(s); } #ifndef CONFIG_SLOB -- cgit v1.2.3