diff options
author | Muchun Song <songmuchun@bytedance.com> | 2022-06-21 20:56:57 +0800 |
---|---|---|
committer | Liam R. Howlett <Liam.Howlett@oracle.com> | 2022-07-19 20:15:11 -0400 |
commit | 84e55c1067bc577f63dd8be9ff7a66e612663b42 (patch) | |
tree | 73a0730e35667c56333ff3255132263b609c83ab | |
parent | 0f6ab2c233c9169d7818c5596ef3d635535fe93c (diff) |
mm: lru: add VM_WARN_ON_ONCE_FOLIO to lru maintenance function
We need to make sure that the page is deleted from or added to the correct
lruvec list. So add a VM_WARN_ON_ONCE_FOLIO() to catch invalid users.
Then the VM_BUG_ON_PAGE() in move_pages_to_lru() could be removed since
add_page_to_lru_list() will check that.
Link: https://lkml.kernel.org/r/20220621125658.64935-11-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Koutný <mkoutny@suse.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Waiman Long <longman@redhat.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | include/linux/mm_inline.h | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 1 |
2 files changed, 6 insertions, 1 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 7b25b53c474a..6585198b19e2 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -99,6 +99,8 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) { enum lru_list lru = folio_lru_list(folio); + VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); if (lru != LRU_UNEVICTABLE) @@ -116,6 +118,8 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) { enum lru_list lru = folio_lru_list(folio); + VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); /* This is not expected to be used on LRU_UNEVICTABLE */ @@ -133,6 +137,8 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) { enum lru_list lru = folio_lru_list(folio); + VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + if (lru != LRU_UNEVICTABLE) list_del(&folio->lru); update_lru_size(lruvec, lru, folio_zonenum(folio), diff --git a/mm/vmscan.c b/mm/vmscan.c index 697656151431..51b1607c81e4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2361,7 +2361,6 @@ static unsigned int move_pages_to_lru(struct list_head *list) continue; } - VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); lruvec_add_folio(lruvec, folio); nr_pages = folio_nr_pages(folio); nr_moved += nr_pages; |