summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-07 10:08:17 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-07 10:08:17 -0800
commit1d6789c3bc2b70bed1eb71aa616b1d94f9c23a63 (patch)
treeeb5bcbae964e99242d54c060085a87fc27c2ddf8 /kernel/sched.c
parent0a9e0703497013cf7a21455e51face5f048a187f (diff)
parent521f1a2489c41f8b1181b0a8eb52e1c34284d50b (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel: sched: don't allow rt_runtime_us to be zero for groups having rt tasks sched: rt-group: fixup schedulability constraints calculation sched: fix the wrong time slice value for SCHED_FIFO tasks sched: export task_nice sched: balance RT task resched only on runqueue sched: retain vruntime
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index dcd553cc4ee8..52b98675acb2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4422,7 +4422,7 @@ int task_nice(const struct task_struct *p)
{
return TASK_NICE(p);
}
-EXPORT_SYMBOL_GPL(task_nice);
+EXPORT_SYMBOL(task_nice);
/**
* idle_cpu - is a given cpu idle currently?
@@ -5100,7 +5100,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
time_slice = 0;
if (p->policy == SCHED_RR) {
time_slice = DEF_TIMESLICE;
- } else {
+ } else if (p->policy != SCHED_FIFO) {
struct sched_entity *se = &p->se;
unsigned long flags;
struct rq *rq;
@@ -7625,6 +7625,11 @@ void sched_move_task(struct task_struct *tsk)
set_task_rq(tsk, task_cpu(tsk));
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ if (tsk->sched_class->moved_group)
+ tsk->sched_class->moved_group(tsk);
+#endif
+
if (on_rq) {
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
@@ -7721,9 +7726,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
if (runtime == RUNTIME_INF)
return 1ULL << 16;
- runtime *= (1ULL << 16);
- div64_64(runtime, period);
- return runtime;
+ return div64_64(runtime << 16, period);
}
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
@@ -7747,25 +7750,40 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
return total + to_ratio(period, runtime) < global_ratio;
}
+/* Must be called with tasklist_lock held */
+static inline int tg_has_rt_tasks(struct task_group *tg)
+{
+ struct task_struct *g, *p;
+ do_each_thread(g, p) {
+ if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
+ return 1;
+ } while_each_thread(g, p);
+ return 0;
+}
+
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
int err = 0;
- rt_period = sysctl_sched_rt_period * NSEC_PER_USEC;
+ rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us == -1)
- rt_runtime = rt_period;
+ rt_runtime = RUNTIME_INF;
mutex_lock(&rt_constraints_mutex);
+ read_lock(&tasklist_lock);
+ if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
+ err = -EBUSY;
+ goto unlock;
+ }
if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
err = -EINVAL;
goto unlock;
}
- if (rt_runtime_us == -1)
- rt_runtime = RUNTIME_INF;
tg->rt_runtime = rt_runtime;
unlock:
+ read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return err;