summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c48
1 files changed, 47 insertions, 1 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 967bc7ea239d..b19ebb3a666b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2231,9 +2231,21 @@ static inline bool should_continue_reclaim(struct zone *zone,
}
}
-static void shrink_zone(struct zone *zone, struct scan_control *sc)
+/**
+ * __shrink_zone - shrinks a given zone
+ *
+ * @zone: zone to shrink
+ * @sc: scan control with additional reclaim parameters
+ * @honor_memcg_guarantee: do not reclaim memcgs which are within their memory
+ * guarantee
+ *
+ * Returns the number of reclaimed memcgs.
+ */
+static unsigned __shrink_zone(struct zone *zone, struct scan_control *sc,
+ bool honor_memcg_guarantee)
{
unsigned long nr_reclaimed, nr_scanned;
+ unsigned nr_scanned_groups = 0;
do {
struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2250,7 +2262,20 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
do {
struct lruvec *lruvec;
+ /* Memcg might be protected from the reclaim */
+ if (honor_memcg_guarantee &&
+ mem_cgroup_within_guarantee(memcg, root)) {
+ /*
+ * It would be more optimal to skip the memcg
+ * subtree now but we do not have a memcg iter
+ * helper for that. Anyone?
+ */
+ memcg = mem_cgroup_iter(root, memcg, &reclaim);
+ continue;
+ }
+
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+ nr_scanned_groups++;
sc->swappiness = mem_cgroup_swappiness(memcg);
shrink_lruvec(lruvec, sc);
@@ -2279,6 +2304,27 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
sc->nr_scanned - nr_scanned, sc));
+
+ return nr_scanned_groups;
+}
+
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
+{
+ bool honor_guarantee = true;
+
+ while (!__shrink_zone(zone, sc, honor_guarantee)) {
+ /*
+ * The previous round of reclaim didn't find anything to scan
+ * because
+ * a) the whole reclaimed hierarchy is within guarantee so
+ * we fallback to ignore the guarantee because other option
+ * would be the OOM
+ * b) multiple reclaimers are racing and so the first round
+ * should be retried
+ */
+ if (mem_cgroup_all_within_guarantee(sc->target_mem_cgroup))
+ honor_guarantee = false;
+ }
}
/* Returns true if compaction should go ahead for a high-order request */