summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2021-10-19 02:08:15 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-05-12 12:25:45 +0200
commita22d66eb518f444bd66b38dbf8235d98e40f37e1 (patch)
tree03a532f15b644e9414423aa5c2945db9b7f4cd36 /kernel
parent40fb3812d99746f0322ada92fdc8d904a024e6de (diff)
rcu: Apply callbacks processing time limit only on softirq
commit a554ba288845fd3f6f12311fd76a51694233458a upstream. Time limit only makes sense when callbacks are serviced in softirq mode because: _ In case we need to get back to the scheduler, cond_resched_tasks_rcu_qs() is called after each callback. _ In case some other softirq vector needs the CPU, the call to local_bh_enable() before cond_resched_tasks_rcu_qs() takes care about them via a call to do_softirq(). Therefore, make sure the time limit only applies to softirq mode. Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Neeraj Upadhyay <neeraju@codeaurora.org> Cc: Uladzislau Rezki <urezki@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> [UR: backport to 5.10-stable] Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f340df6ebd86..b41009a283ca 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2456,7 +2456,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
div = READ_ONCE(rcu_divisor);
div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
bl = max(rdp->blimit, pending >> div);
- if (unlikely(bl > 100)) {
+ if (in_serving_softirq() && unlikely(bl > 100)) {
long rrn = READ_ONCE(rcu_resched_ns);
rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
@@ -2494,6 +2494,18 @@ static void rcu_do_batch(struct rcu_data *rdp)
if (-rcl.len >= bl && (need_resched() ||
(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
+
+ /*
+ * Make sure we don't spend too much time here and deprive other
+ * softirq vectors of CPU cycles.
+ */
+ if (unlikely(tlimit)) {
+ /* only call local_clock() every 32 callbacks */
+ if (likely((-rcl.len & 31) || local_clock() < tlimit))
+ continue;
+ /* Exceeded the time limit, so leave. */
+ break;
+ }
} else {
local_bh_enable();
lockdep_assert_irqs_enabled();
@@ -2501,18 +2513,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
lockdep_assert_irqs_enabled();
local_bh_disable();
}
-
- /*
- * Make sure we don't spend too much time here and deprive other
- * softirq vectors of CPU cycles.
- */
- if (unlikely(tlimit)) {
- /* only call local_clock() every 32 callbacks */
- if (likely((-rcl.len & 31) || local_clock() < tlimit))
- continue;
- /* Exceeded the time limit, so leave. */
- break;
- }
}
local_irq_save(flags);