summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 17:23:50 +0200
committerIngo Molnar <mingo@elte.hu>2011-04-11 16:50:56 +0200
commita3380736e4b3a0c6249ed18ee20ddb19bf95cad5 (patch)
tree561b8318f3b5e1e4596640dc4f7035e1b35348e4 /kernel
parent498d27285f27c1ff368f525eef19aca146cb412e (diff)
sched: Also serialize ttwu_local() with p->pi_lock
Since we now serialize ttwu() using p->pi_lock, we also need to serialize ttwu_local() using that, otherwise, once we drop the rq->lock from ttwu() it can race with ttwu_local(). Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110405152729.192366907@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3ee30f658e10..2e288eafe461 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2559,9 +2559,9 @@ out:
* try_to_wake_up_local - try to wake up a local task with rq lock held
* @p: the thread to be awakened
*
- * Put @p on the run-queue if it's not already there. The caller must
+ * Put @p on the run-queue if it's not already there. The caller must
* ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task. this_rq() stays locked over invocation.
+ * the current task.
*/
static void try_to_wake_up_local(struct task_struct *p)
{
@@ -2569,16 +2569,21 @@ static void try_to_wake_up_local(struct task_struct *p)
BUG_ON(rq != this_rq());
BUG_ON(p == current);
- lockdep_assert_held(&rq->lock);
+
+ raw_spin_unlock(&rq->lock);
+ raw_spin_lock(&p->pi_lock);
+ raw_spin_lock(&rq->lock);
if (!(p->state & TASK_NORMAL))
- return;
+ goto out;
if (!p->on_rq)
activate_task(rq, p, ENQUEUE_WAKEUP);
ttwu_post_activation(p, rq, 0);
ttwu_stat(rq, p, smp_processor_id(), 0);
+out:
+ raw_spin_unlock(&p->pi_lock);
}
/**
@@ -4083,6 +4088,7 @@ pick_next_task(struct rq *rq)
*/
asmlinkage void __sched schedule(void)
{
+ struct task_struct *to_wakeup = NULL;
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
@@ -4113,13 +4119,8 @@ need_resched:
* task to maintain concurrency. If so, wake
* up the task.
*/
- if (prev->flags & PF_WQ_WORKER) {
- struct task_struct *to_wakeup;
-
+ if (prev->flags & PF_WQ_WORKER)
to_wakeup = wq_worker_sleeping(prev, cpu);
- if (to_wakeup)
- try_to_wake_up_local(to_wakeup);
- }
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
}
@@ -4136,8 +4137,13 @@ need_resched:
raw_spin_lock(&rq->lock);
}
+ /*
+ * All three: try_to_wake_up_local(), pre_schedule() and idle_balance()
+ * can drop rq->lock.
+ */
+ if (to_wakeup)
+ try_to_wake_up_local(to_wakeup);
pre_schedule(rq, prev);
-
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);