summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2020-06-03 16:01:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 20:09:47 -0700
commit49e50d277ba2bb2c6e64632bc3193585674a2261 (patch)
tree34636d8995f966974a2752025f0a208664a3e416 /mm
parent9f762dbe19b9f16d5df5603d4b93bad391c302bc (diff)
mm: memcontrol: prepare move_account for removal of private page type counters
When memcg uses the generic vmstat counters, it doesn't need to do anything at charging and uncharging time. It does, however, need to migrate counts when pages move to a different cgroup in move_account. Prepare the move_account function for the arrival of NR_FILE_PAGES, NR_ANON_MAPPED, NR_ANON_THPS etc. by having a branch for files and a branch for anon, which can then divided into sub-branches. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Alex Shi <alex.shi@linux.alibaba.com> Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Michal Hocko <mhocko@suse.com> Cc: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Balbir Singh <bsingharora@gmail.com> Link: http://lkml.kernel.org/r/20200508183105.225460-8-hannes@cmpxchg.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ff45bef46ffe..f82ae3723760 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5434,7 +5434,6 @@ static int mem_cgroup_move_account(struct page *page,
struct pglist_data *pgdat;
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
int ret;
- bool anon;
VM_BUG_ON(from == to);
VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -5452,25 +5451,27 @@ static int mem_cgroup_move_account(struct page *page,
if (page->mem_cgroup != from)
goto out_unlock;
- anon = PageAnon(page);
-
pgdat = page_pgdat(page);
from_vec = mem_cgroup_lruvec(from, pgdat);
to_vec = mem_cgroup_lruvec(to, pgdat);
lock_page_memcg(page);
- if (!anon && page_mapped(page)) {
- __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
- __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
- }
+ if (!PageAnon(page)) {
+ if (page_mapped(page)) {
+ __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
+ }
- if (!anon && PageDirty(page)) {
- struct address_space *mapping = page_mapping(page);
+ if (PageDirty(page)) {
+ struct address_space *mapping = page_mapping(page);
- if (mapping_cap_account_dirty(mapping)) {
- __mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
- __mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
+ if (mapping_cap_account_dirty(mapping)) {
+ __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
+ -nr_pages);
+ __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
+ nr_pages);
+ }
}
}