diff options
-rw-r--r-- | kernel/sched/MuQSS.c | 36 | ||||
-rw-r--r-- | kernel/sched/MuQSS.h | 15 | ||||
-rw-r--r-- | kernel/sched/idle.c | 10 | ||||
-rw-r--r-- | kernel/sched/sched.h | 5 |
4 files changed, 50 insertions, 16 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c index fe9425574076..a8480fcc7870 100644 --- a/kernel/sched/MuQSS.c +++ b/kernel/sched/MuQSS.c @@ -137,7 +137,7 @@ void print_scheduler_version(void) { - printk(KERN_INFO "MuQSS CPU scheduler v0.120 by Con Kolivas.\n"); + printk(KERN_INFO "MuQSS CPU scheduler v0.135 by Con Kolivas.\n"); } /* @@ -752,6 +752,13 @@ static inline bool task_queued(struct task_struct *p) static void enqueue_task(struct rq *rq, struct task_struct *p, int flags); static inline void resched_if_idle(struct rq *rq); +/* Dodgy workaround till we figure out where the softirqs are going */ +static inline void do_pending_softirq(struct rq *rq, struct task_struct *next) +{ + if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt())) + do_softirq_own_stack(); +} + static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_SMP @@ -808,9 +815,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) #endif rq_unlock(rq); - /* Dodgy workaround till we figure out where the softirqs are going */ - if (unlikely(current == rq->idle && local_softirq_pending() && !in_interrupt())) - do_softirq_own_stack(); + do_pending_softirq(rq, current); local_irq_enable(); } @@ -3494,6 +3499,12 @@ static inline struct task_struct * is locked so entries will always be accurate. */ if (!sched_interactive) { + /* + * Don't reschedule balance across nodes unless the CPU + * is idle. + */ + if (edt != idle && rq->cpu_locality[other_rq->cpu] > 3) + break; if (entries <= best_entries) continue; } else if (!entries) @@ -3518,8 +3529,8 @@ static inline struct task_struct key = other_rq->node.next[0]->key; /* Reevaluate key after locking */ if (unlikely(key >= best_key)) { - if (i) - unlock_rq(other_rq); + /* This will always be when rq != other_rq */ + unlock_rq(other_rq); continue; } @@ -3793,13 +3804,8 @@ static void __sched notrace __schedule(bool preempt) struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev); - if (to_wakeup) { - /* This shouldn't happen, but does */ - if (WARN_ONCE((to_wakeup == prev), "Waking up prev as worker\n")) - deactivate = false; - else - try_to_wake_up_local(to_wakeup); - } + if (to_wakeup) + try_to_wake_up_local(to_wakeup); } } switch_count = &prev->nvcsw; @@ -3851,7 +3857,9 @@ static void __sched notrace __schedule(bool preempt) context_switch(rq, prev, next); /* unlocks the rq */ } else { check_siblings(rq); - rq_unlock_irq(rq); + rq_unlock(rq); + do_pending_softirq(rq, next); + local_irq_enable(); } } diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h index f9510d739777..3565a7d8a5ee 100644 --- a/kernel/sched/MuQSS.h +++ b/kernel/sched/MuQSS.h @@ -1,5 +1,6 @@ #include <linux/sched.h> #include <linux/cpuidle.h> +#include <linux/interrupt.h> #include <linux/skip_list.h> #include <linux/stop_machine.h> #include "cpuacct.h" @@ -325,4 +326,18 @@ static inline void cpufreq_trigger(u64 time, unsigned long util) #define arch_scale_freq_invariant() (false) #endif +/* + * This should only be called when current == rq->idle. Dodgy workaround for + * when softirqs are pending and we are in the idle loop. Setting current to + * resched will kick us out of the idle loop and the softirqs will be serviced + * on our next pass through schedule(). + */ +static inline bool softirq_pending(int cpu) +{ + if (likely(!local_softirq_pending())) + return false; + set_tsk_need_resched(current); + return true; +} + #endif /* MUQSS_SCHED_H */ diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 060b76d854ec..51264e6b120e 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -208,6 +208,8 @@ static void cpu_idle_loop(void) int cpu = smp_processor_id(); while (1) { + bool pending = false; + /* * If the arch has a polling bit, we maintain an invariant: * @@ -219,7 +221,10 @@ static void cpu_idle_loop(void) __current_set_polling(); quiet_vmstat(); - tick_nohz_idle_enter(); + if (unlikely(softirq_pending(cpu))) + pending = true; + else + tick_nohz_idle_enter(); while (!need_resched()) { check_pgt_cache(); @@ -259,7 +264,8 @@ static void cpu_idle_loop(void) * not have had an IPI to fold the state for us. */ preempt_set_need_resched(); - tick_nohz_idle_exit(); + if (!pending) + tick_nohz_idle_exit(); __current_clr_polling(); /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c64fc5114004..cdefab6df124 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1813,3 +1813,8 @@ static inline void cpufreq_trigger_update(u64 time) {} #else /* arch_scale_freq_capacity */ #define arch_scale_freq_invariant() (false) #endif + +static inline bool softirq_pending(int cpu) +{ + return false; +} |