summaryrefslogtreecommitdiff
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c83
1 files changed, 52 insertions, 31 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 47ceac9e8552..908c04f9dad0 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
- for_each_cpu_mask(i, rd->span) {
+ for_each_cpu_mask_nr(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
@@ -253,7 +253,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
diff = iter->rt_runtime - iter->rt_time;
if (diff > 0) {
- do_div(diff, weight);
+ diff = div_u64((u64)diff, weight);
if (rt_rq->rt_runtime + diff > rt_period)
diff = rt_period - rt_rq->rt_runtime;
iter->rt_runtime -= diff;
@@ -505,7 +505,9 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_rq->rt_nr_running++;
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
+#ifdef CONFIG_SMP
struct rq *rq = rq_of_rt_rq(rt_rq);
+#endif
rt_rq->highest_prio = rt_se_prio(rt_se);
#ifdef CONFIG_SMP
@@ -599,11 +601,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
return;
- if (rt_se->nr_cpus_allowed == 1)
- list_add(&rt_se->run_list, queue);
- else
- list_add_tail(&rt_se->run_list, queue);
-
+ list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap);
inc_rt_tasks(rt_se, rt_rq);
@@ -688,32 +686,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
* Put task to the end of the run list without the overhead of dequeue
* followed by enqueue.
*/
-static
-void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+static void
+requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
{
- struct rt_prio_array *array = &rt_rq->active;
-
if (on_rt_rq(rt_se)) {
- list_del_init(&rt_se->run_list);
- list_add_tail(&rt_se->run_list,
- array->queue + rt_se_prio(rt_se));
+ struct rt_prio_array *array = &rt_rq->active;
+ struct list_head *queue = array->queue + rt_se_prio(rt_se);
+
+ if (head)
+ list_move(&rt_se->run_list, queue);
+ else
+ list_move_tail(&rt_se->run_list, queue);
}
}
-static void requeue_task_rt(struct rq *rq, struct task_struct *p)
+static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq;
for_each_sched_rt_entity(rt_se) {
rt_rq = rt_rq_of_se(rt_se);
- requeue_rt_entity(rt_rq, rt_se);
+ requeue_rt_entity(rt_rq, rt_se, head);
}
}
static void yield_task_rt(struct rq *rq)
{
- requeue_task_rt(rq, rq->curr);
+ requeue_task_rt(rq, rq->curr, 0);
}
#ifdef CONFIG_SMP
@@ -753,6 +753,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
*/
return task_cpu(p);
}
+
+static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
+{
+ cpumask_t mask;
+
+ if (rq->curr->rt.nr_cpus_allowed == 1)
+ return;
+
+ if (p->rt.nr_cpus_allowed != 1
+ && cpupri_find(&rq->rd->cpupri, p, &mask))
+ return;
+
+ if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
+ return;
+
+ /*
+ * There appears to be other cpus that can accept
+ * current and none to run 'p', so lets reschedule
+ * to try and push current away:
+ */
+ requeue_task_rt(rq, p, 1);
+ resched_task(rq->curr);
+}
+
#endif /* CONFIG_SMP */
/*
@@ -778,18 +802,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
* to move current somewhere else, making room for our non-migratable
* task.
*/
- if((p->prio == rq->curr->prio)
- && p->rt.nr_cpus_allowed == 1
- && rq->curr->rt.nr_cpus_allowed != 1) {
- cpumask_t mask;
-
- if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
- /*
- * There appears to be other cpus that can accept
- * current, so lets reschedule to try and push it away
- */
- resched_task(rq->curr);
- }
+ if (p->prio == rq->curr->prio && !need_resched())
+ check_preempt_equal_prio(rq, p);
#endif
}
@@ -922,6 +936,13 @@ static int find_lowest_rq(struct task_struct *task)
return -1; /* No targets found */
/*
+ * Only consider CPUs that are usable for migration.
+ * I guess we might want to change cpupri_find() to ignore those
+ * in the first place.
+ */
+ cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
+
+ /*
* At this point we have built a mask of cpus representing the
* lowest priority tasks in the system. Now we want to elect
* the best one based on our affinity and topology.
@@ -1107,7 +1128,7 @@ static int pull_rt_task(struct rq *this_rq)
next = pick_next_task_rt(this_rq);
- for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
+ for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;
@@ -1415,7 +1436,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
* on the queue:
*/
if (p->rt.run_list.prev != p->rt.run_list.next) {
- requeue_task_rt(rq, p);
+ requeue_task_rt(rq, p, 0);
set_tsk_need_resched(p);
}
}