summaryrefslogtreecommitdiff
path: root/mm/vmstat.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r--mm/vmstat.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 0885a34197b7..8ce2620344b2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -319,6 +319,16 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
long x;
long t;
+ /*
+ * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
+ * atomicity is provided by IRQs being disabled -- either explicitly
+ * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
+ * CPU migrations and preemption potentially corrupts a counter so
+ * disable preemption.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
@@ -328,6 +338,9 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
EXPORT_SYMBOL(__mod_zone_page_state);
@@ -350,6 +363,10 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
delta >>= PAGE_SHIFT;
}
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
@@ -359,6 +376,9 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
EXPORT_SYMBOL(__mod_node_page_state);
@@ -391,6 +411,10 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
@@ -399,6 +423,9 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
@@ -409,6 +436,10 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
@@ -417,6 +448,9 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
node_page_state_add(v + overstep, pgdat, item);
__this_cpu_write(*p, -overstep);
}
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
@@ -437,6 +471,10 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
@@ -445,6 +483,9 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
@@ -455,6 +496,10 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
@@ -463,6 +508,9 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
node_page_state_add(v - overstep, pgdat, item);
__this_cpu_write(*p, overstep);
}
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)