summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2016-10-22 09:33:49 +1100
committerCon Kolivas <kernel@kolivas.org>2016-10-22 09:55:05 +1100
commit7f9672079f5db72ba078f0e7cd1772b795ac0312 (patch)
tree4b74c4ea3a8ff6611bf447dd8aaa78f76b97b002 /kernel
parentb2a4f8a05b6fdcec2742661ec24829a73bb0bd1c (diff)
Resched when needed after calling do_set_cpus_allowed from outside the scheduler code, consolidating smp_send_reschedule to only be called
if the remote CPU is online.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/MuQSS.c65
1 files changed, 42 insertions, 23 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index 7fb20528a6be..a921794a6088 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -676,6 +676,12 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
next->on_cpu = 1;
}
+static inline void smp_sched_reschedule(int cpu)
+{
+ if (likely(cpu_online(cpu)))
+ smp_send_reschedule(cpu);
+}
+
/*
* resched_task - mark a task 'to be rescheduled now'.
*
@@ -702,7 +708,7 @@ void resched_task(struct task_struct *p)
}
if (set_nr_and_not_polling(p))
- smp_send_reschedule(cpu);
+ smp_sched_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -1113,7 +1119,7 @@ static void resched_curr(struct rq *rq)
}
if (set_nr_and_not_polling(rq->curr))
- smp_send_reschedule(cpu);
+ smp_sched_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -1208,7 +1214,7 @@ static inline void resched_idle(struct rq *rq)
return;
}
- smp_send_reschedule(rq->cpu);
+ smp_sched_reschedule(rq->cpu);
}
static struct rq *resched_best_idle(struct task_struct *p, int cpu)
@@ -1588,7 +1594,7 @@ void kick_process(struct task_struct *p)
preempt_disable();
cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p))
- smp_send_reschedule(cpu);
+ smp_sched_reschedule(cpu);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
@@ -1844,7 +1850,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
if (!set_nr_if_polling(rq->idle))
- smp_send_reschedule(cpu);
+ smp_sched_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -1865,7 +1871,7 @@ void wake_up_if_idle(int cpu)
} else {
rq_lock_irqsave(rq, &flags);
if (likely(is_idle_task(rq->curr)))
- smp_send_reschedule(cpu);
+ smp_sched_reschedule(cpu);
/* Else cpu is not in idle, do nothing here */
rq_unlock_irqrestore(rq, &flags);
}
@@ -3812,13 +3818,9 @@ static void check_smt_siblings(struct rq *this_rq)
rq = cpu_rq(other_cpu);
if (rq_idle(rq))
continue;
- if (unlikely(!rq->online))
- continue;
p = rq->curr;
- if (!smt_schedule(p, this_rq)) {
- set_tsk_need_resched(p);
- smp_send_reschedule(other_cpu);
- }
+ if (!smt_schedule(p, this_rq))
+ resched_curr(rq);
}
}
@@ -3830,14 +3832,8 @@ static void wake_smt_siblings(struct rq *this_rq)
struct rq *rq;
rq = cpu_rq(other_cpu);
- if (unlikely(!rq->online))
- continue;
- if (rq_idle(rq)) {
- struct task_struct *p = rq->curr;
-
- set_tsk_need_resched(p);
- smp_send_reschedule(other_cpu);
- }
+ if (rq_idle(rq))
+ resched_idle(rq);
}
}
#else
@@ -5578,7 +5574,7 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+static void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
struct rq *rq = task_rq(p);
@@ -5593,6 +5589,29 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
*/
lockdep_assert_held(&rq->lock);
}
+}
+
+/*
+ * Calling do_set_cpus_allowed from outside the scheduler code may make the
+ * task not be able to run on its current CPU so we resched it here.
+ */
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ __do_set_cpus_allowed(p, new_mask);
+ if (needs_other_cpu(p, task_cpu(p))) {
+ set_task_cpu(p, valid_task_cpu(p));
+ resched_task(p);
+ }
+}
+
+/*
+ * For internal scheduler calls to do_set_cpus_allowed which will resched
+ * themselves if needed.
+ */
+static void _do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ __do_set_cpus_allowed(p, new_mask);
+ /* __set_cpus_allowed_ptr will handle the reschedule in this variant */
if (needs_other_cpu(p, task_cpu(p)))
set_task_cpu(p, valid_task_cpu(p));
}
@@ -5790,7 +5809,7 @@ void wake_up_idle_cpu(int cpu)
return;
if (set_nr_and_not_polling(cpu_rq(cpu)->idle))
- smp_send_reschedule(cpu);
+ smp_sched_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -5850,7 +5869,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
queued = task_queued(p);
- do_set_cpus_allowed(p, new_mask);
+ _do_set_cpus_allowed(p, new_mask);
if (p->flags & PF_KTHREAD) {
/*