summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorSeongJae Park <sj@kernel.org>2024-04-29 15:44:50 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-05-07 10:37:02 -0700
commit14f5be2a2d9bb7eb21807b6e62de73dd24082b73 (patch)
tree1cab65741d808310decfb00de132ce68a99ba0e5 /mm
parentebd3f70c630a9a1aa1e27a63a7e0ffe97a0c1189 (diff)
mm/vmscan: remove ignore_references argument of reclaim_pages()
All reclaim_pages() callers are setting 'ignore_references' parameter 'true'. In other words, the parameter is not really being used. Remove the argument to make it simple. Link: https://lkml.kernel.org/r/20240429224451.67081-4-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/damon/paddr.c2
-rw-r--r--mm/internal.h2
-rw-r--r--mm/madvise.c4
-rw-r--r--mm/vmscan.c6
4 files changed, 7 insertions, 7 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 974edef1740d..18797c1b419b 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -283,7 +283,7 @@ put_folio:
}
if (install_young_filter)
damos_destroy_filter(filter);
- applied = reclaim_pages(&folio_list, true);
+ applied = reclaim_pages(&folio_list);
cond_resched();
return applied * PAGE_SIZE;
}
diff --git a/mm/internal.h b/mm/internal.h
index 6803c7b17c1f..2adabe369403 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1052,7 +1052,7 @@ extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long);
extern void set_pageblock_order(void);
-unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references);
+unsigned long reclaim_pages(struct list_head *folio_list);
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *folio_list);
/* The ALLOC_WMARK bits are used as an index to zone->watermark */
diff --git a/mm/madvise.c b/mm/madvise.c
index 56efea02e26c..c8ba3f3eb54d 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -423,7 +423,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
huge_unlock:
spin_unlock(ptl);
if (pageout)
- reclaim_pages(&folio_list, true);
+ reclaim_pages(&folio_list);
return 0;
}
@@ -547,7 +547,7 @@ restart:
pte_unmap_unlock(start_pte, ptl);
}
if (pageout)
- reclaim_pages(&folio_list, true);
+ reclaim_pages(&folio_list);
cond_resched();
return 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d194e7240df4..fe923a4a56bb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2133,7 +2133,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
return nr_reclaimed;
}
-unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references)
+unsigned long reclaim_pages(struct list_head *folio_list)
{
int nid;
unsigned int nr_reclaimed = 0;
@@ -2156,11 +2156,11 @@ unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references
}
nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid),
- ignore_references);
+ true);
nid = folio_nid(lru_to_folio(folio_list));
} while (!list_empty(folio_list));
- nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid), ignore_references);
+ nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid), true);
memalloc_noreclaim_restore(noreclaim_flag);