summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2021-10-19 02:08:14 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-05-12 12:25:45 +0200
commit40fb3812d99746f0322ada92fdc8d904a024e6de (patch)
tree703b80dd0ba279622d4731aad0a8467505c0fd8f /kernel
parent43dbc3edada66eaa9d406ea9647359e795288ea0 (diff)
rcu: Fix callbacks processing time limit retaining cond_resched()
commit 3e61e95e2d095e308616cba4ffb640f95a480e01 upstream. The callbacks processing time limit makes sure we are not exceeding a given amount of time executing the queue. However its "continue" clause bypasses the cond_resched() call on rcuc and NOCB kthreads, delaying it until we reach the limit, which can be very long... Make sure the scheduler has a higher priority than the time limit. Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Neeraj Upadhyay <neeraju@codeaurora.org> Cc: Uladzislau Rezki <urezki@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> [UR: backport to 5.10-stable + commit update] Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 844c35803739..f340df6ebd86 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2490,10 +2490,22 @@ static void rcu_do_batch(struct rcu_data *rdp)
* Stop only if limit reached and CPU has something to do.
* Note: The rcl structure counts down from zero.
*/
- if (-rcl.len >= bl && !offloaded &&
- (need_resched() ||
- (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
- break;
+ if (in_serving_softirq()) {
+ if (-rcl.len >= bl && (need_resched() ||
+ (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
+ break;
+ } else {
+ local_bh_enable();
+ lockdep_assert_irqs_enabled();
+ cond_resched_tasks_rcu_qs();
+ lockdep_assert_irqs_enabled();
+ local_bh_disable();
+ }
+
+ /*
+ * Make sure we don't spend too much time here and deprive other
+ * softirq vectors of CPU cycles.
+ */
if (unlikely(tlimit)) {
/* only call local_clock() every 32 callbacks */
if (likely((-rcl.len & 31) || local_clock() < tlimit))
@@ -2501,14 +2513,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
/* Exceeded the time limit, so leave. */
break;
}
- if (offloaded) {
- WARN_ON_ONCE(in_serving_softirq());
- local_bh_enable();
- lockdep_assert_irqs_enabled();
- cond_resched_tasks_rcu_qs();
- lockdep_assert_irqs_enabled();
- local_bh_disable();
- }
}
local_irq_save(flags);