summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-03-21 20:20:09 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2022-03-21 20:20:09 -0400
commit1a9d73b5c0b949fd4b53935a43e7dc1e0d9d1127 (patch)
treecbe5bbae21ca9f9a4cf2f206d6bd665903ae0084
parent205d75307a7c71f3807c8aa74405cd7267a40e4b (diff)
Shrinker improvements
After memory allocation failure, don't rely on /proc/meminfo to figure out how much memory we should free - instead unconditionally free 1/8th of each cache. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--linux/shrinker.c27
4 files changed, 29 insertions, 6 deletions
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 626b768c..eba6cfdd 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -25,6 +25,6 @@ struct shrinker {
int register_shrinker(struct shrinker *);
void unregister_shrinker(struct shrinker *);
-void run_shrinkers(void);
+void run_shrinkers(gfp_t gfp_mask, bool);
#endif /* __TOOLS_LINUX_SHRINKER_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index bc99973f..557c0411 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -20,7 +20,7 @@ static inline void *kmalloc(size_t size, gfp_t flags)
void *p;
do {
- run_shrinkers();
+ run_shrinkers(flags, i != 0);
if (size) {
size_t alignment = min(rounddown_pow_of_two(size), (size_t)PAGE_SIZE);
@@ -83,7 +83,7 @@ static inline struct page *alloc_pages(gfp_t flags, unsigned int order)
void *p;
do {
- run_shrinkers();
+ run_shrinkers(flags, i != 0);
p = aligned_alloc(PAGE_SIZE, size);
if (p && (flags & __GFP_ZERO))
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index ccb319eb..965e341d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -20,7 +20,7 @@ static inline void *__vmalloc(unsigned long size, gfp_t gfp_mask)
size = round_up(size, PAGE_SIZE);
do {
- run_shrinkers();
+ run_shrinkers(gfp_mask, i != 0);
p = aligned_alloc(PAGE_SIZE, size);
if (p && gfp_mask & __GFP_ZERO)
diff --git a/linux/shrinker.c b/linux/shrinker.c
index f6c979aa..876c1bae 100644
--- a/linux/shrinker.c
+++ b/linux/shrinker.c
@@ -65,7 +65,24 @@ static struct meminfo read_meminfo(void)
return ret;
}
-void run_shrinkers(void)
+static void run_shrinkers_allocation_failed(gfp_t gfp_mask)
+{
+ struct shrinker *shrinker;
+
+ mutex_lock(&shrinker_lock);
+ list_for_each_entry(shrinker, &shrinker_list, list) {
+ struct shrink_control sc = { .gfp_mask = gfp_mask, };
+
+ unsigned long have = shrinker->count_objects(shrinker, &sc);
+
+ sc.nr_to_scan = have / 8;
+
+ shrinker->scan_objects(shrinker, &sc);
+ }
+ mutex_unlock(&shrinker_lock);
+}
+
+void run_shrinkers(gfp_t gfp_mask, bool allocation_failed)
{
struct shrinker *shrinker;
struct meminfo info;
@@ -75,6 +92,11 @@ void run_shrinkers(void)
if (list_empty(&shrinker_list))
return;
+ if (allocation_failed) {
+ run_shrinkers_allocation_failed(gfp_mask);
+ return;
+ }
+
info = read_meminfo();
if (info.total && info.available) {
@@ -92,7 +114,8 @@ void run_shrinkers(void)
mutex_lock(&shrinker_lock);
list_for_each_entry(shrinker, &shrinker_list, list) {
struct shrink_control sc = {
- .nr_to_scan = want_shrink >> PAGE_SHIFT
+ .gfp_mask = gfp_mask,
+ .nr_to_scan = want_shrink >> PAGE_SHIFT
};
shrinker->scan_objects(shrinker, &sc);