summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2016-10-30 10:57:00 +1100
committerCon Kolivas <kernel@kolivas.org>2016-10-31 11:26:57 +1100
commit6d6ea6a24879ed0f5c99d64d3d621d2007c699a3 (patch)
tree6e4a25e7302be1782e90393450f33253bb62851c /kernel
parent911164cc453c7b00e8c9305761d484702b291b22 (diff)
Add a dodgy workaround to do pending softirqs if we detect them upon a runqueue going idle.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/MuQSS.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index f66a3daffbf8..fe9425574076 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -806,7 +806,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
raw_spin_unlock(&prev->pi_lock);
}
#endif
- raw_spin_unlock_irq(&rq->lock);
+ rq_unlock(rq);
+
+ /* Dodgy workaround till we figure out where the softirqs are going */
+ if (unlikely(current == rq->idle && local_softirq_pending() && !in_interrupt()))
+ do_softirq_own_stack();
+
+ local_irq_enable();
}
static inline bool deadline_before(u64 deadline, u64 time)
@@ -2548,7 +2554,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* past. prev == current is still correct but we need to recalculate this_rq
* because prev may have moved to another CPU.
*/
-static struct rq *finish_task_switch(struct task_struct *prev)
+static void finish_task_switch(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
@@ -2601,7 +2607,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
kprobe_flush_task(prev);
put_task_struct(prev);
}
- return rq;
}
/**
@@ -2609,10 +2614,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* @prev: the thread we just switched away from.
*/
asmlinkage __visible void schedule_tail(struct task_struct *prev)
- __releases(rq->lock)
{
- struct rq *rq;
-
/*
* New tasks start with FORK_PREEMPT_COUNT, see there and
* finish_task_switch() for details.
@@ -2622,7 +2624,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
* PREEMPT_COUNT kernels).
*/
- rq = finish_task_switch(prev);
+ finish_task_switch(prev);
preempt_enable();
if (current->set_child_tid)
@@ -2632,7 +2634,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
/*
* context_switch - switch to the new MM and the new thread's register state.
*/
-static __always_inline struct rq *
+static __always_inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
@@ -2672,7 +2674,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
switch_to(prev, next, prev);
barrier();
- return finish_task_switch(prev);
+ finish_task_switch(prev);
}
/*
@@ -3846,7 +3848,7 @@ static void __sched notrace __schedule(bool preempt)
++*switch_count;
trace_sched_switch(preempt, prev, next);
- rq = context_switch(rq, prev, next); /* unlocks the rq */
+ context_switch(rq, prev, next); /* unlocks the rq */
} else {
check_siblings(rq);
rq_unlock_irq(rq);