summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2016-11-05 08:51:39 +1100
committerCon Kolivas <kernel@kolivas.org>2016-11-05 08:53:38 +1100
commit945092c3a5133ddd9dff4da56619f850fee4cc41 (patch)
tree49d89996a703b2d044bef1a604d7eb51f24c5cbb /kernel
parent4c0bced79d7270f148506031a4d39f17cd9e03c6 (diff)
Kick out of the idle loop if there are softirqs pending, avoiding trying to call nohz idle and service them on exit from schedule.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/MuQSS.c15
-rw-r--r--kernel/sched/MuQSS.h15
-rw-r--r--kernel/sched/idle.c10
-rw-r--r--kernel/sched/sched.h5
4 files changed, 39 insertions, 6 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index 57d1db9c7213..6d1d5a420bc4 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -752,6 +752,13 @@ static inline bool task_queued(struct task_struct *p)
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
static inline void resched_if_idle(struct rq *rq);
+/* Dodgy workaround till we figure out where the softirqs are going */
+static inline void do_pending_softirq(struct rq *rq, struct task_struct *next)
+{
+ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt()))
+ do_softirq_own_stack();
+}
+
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
@@ -808,9 +815,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#endif
rq_unlock(rq);
- /* Dodgy workaround till we figure out where the softirqs are going */
- if (unlikely(current == rq->idle && local_softirq_pending() && !in_interrupt()))
- do_softirq_own_stack();
+ do_pending_softirq(rq, current);
local_irq_enable();
}
@@ -3852,7 +3857,9 @@ static void __sched notrace __schedule(bool preempt)
context_switch(rq, prev, next); /* unlocks the rq */
} else {
check_siblings(rq);
- rq_unlock_irq(rq);
+ rq_unlock(rq);
+ do_pending_softirq(rq, next);
+ local_irq_enable();
}
}
diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
index f9510d739777..3565a7d8a5ee 100644
--- a/kernel/sched/MuQSS.h
+++ b/kernel/sched/MuQSS.h
@@ -1,5 +1,6 @@
#include <linux/sched.h>
#include <linux/cpuidle.h>
+#include <linux/interrupt.h>
#include <linux/skip_list.h>
#include <linux/stop_machine.h>
#include "cpuacct.h"
@@ -325,4 +326,18 @@ static inline void cpufreq_trigger(u64 time, unsigned long util)
#define arch_scale_freq_invariant() (false)
#endif
+/*
+ * This should only be called when current == rq->idle. Dodgy workaround for
+ * when softirqs are pending and we are in the idle loop. Setting current to
+ * resched will kick us out of the idle loop and the softirqs will be serviced
+ * on our next pass through schedule().
+ */
+static inline bool softirq_pending(int cpu)
+{
+ if (likely(!local_softirq_pending()))
+ return false;
+ set_tsk_need_resched(current);
+ return true;
+}
+
#endif /* MUQSS_SCHED_H */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 060b76d854ec..51264e6b120e 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -208,6 +208,8 @@ static void cpu_idle_loop(void)
int cpu = smp_processor_id();
while (1) {
+ bool pending = false;
+
/*
* If the arch has a polling bit, we maintain an invariant:
*
@@ -219,7 +221,10 @@ static void cpu_idle_loop(void)
__current_set_polling();
quiet_vmstat();
- tick_nohz_idle_enter();
+ if (unlikely(softirq_pending(cpu)))
+ pending = true;
+ else
+ tick_nohz_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -259,7 +264,8 @@ static void cpu_idle_loop(void)
* not have had an IPI to fold the state for us.
*/
preempt_set_need_resched();
- tick_nohz_idle_exit();
+ if (!pending)
+ tick_nohz_idle_exit();
__current_clr_polling();
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c64fc5114004..cdefab6df124 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1813,3 +1813,8 @@ static inline void cpufreq_trigger_update(u64 time) {}
#else /* arch_scale_freq_capacity */
#define arch_scale_freq_invariant() (false)
#endif
+
+static inline bool softirq_pending(int cpu)
+{
+ return false;
+}