summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/deadline.c8
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/stop_machine.c10
4 files changed, 19 insertions, 3 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 10c7b51c0d1f..b5fbdde6afa9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2090,8 +2090,14 @@ retry:
sub_rq_bw(&next_task->dl, &rq->dl);
set_task_cpu(next_task, later_rq->cpu);
add_rq_bw(&next_task->dl, &later_rq->dl);
+
+ /*
+ * Update the later_rq clock here, because the clock is used
+ * by the cpufreq_update_util() inside __add_running_bw().
+ */
+ update_rq_clock(later_rq);
add_running_bw(&next_task->dl, &later_rq->dl);
- activate_task(later_rq, next_task, 0);
+ activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
ret = 1;
resched_curr(later_rq);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 572567078b60..eaaec8364f96 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -836,6 +836,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
* can be time-consuming. Try to avoid it when possible.
*/
raw_spin_lock(&rt_rq->rt_runtime_lock);
+ if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
+ rt_rq->rt_runtime = rt_b->rt_runtime;
skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
if (skip)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 05a831427bc7..56a0fed30c0a 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
}
- if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
+ if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 1ff523dae6e2..e190d1ef3a23 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -260,6 +260,15 @@ retry:
err = 0;
__cpu_stop_queue_work(stopper1, work1, &wakeq);
__cpu_stop_queue_work(stopper2, work2, &wakeq);
+ /*
+ * The waking up of stopper threads has to happen
+ * in the same scheduling context as the queueing.
+ * Otherwise, there is a possibility of one of the
+ * above stoppers being woken up by another CPU,
+ * and preempting us. This will cause us to n ot
+ * wake up the other stopper forever.
+ */
+ preempt_disable();
unlock:
raw_spin_unlock(&stopper2->lock);
raw_spin_unlock_irq(&stopper1->lock);
@@ -271,7 +280,6 @@ unlock:
}
if (!err) {
- preempt_disable();
wake_up_q(&wakeq);
preempt_enable();
}