summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c40
1 files changed, 18 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 79e75fd4b78f..4a378bc47d91 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -75,6 +75,8 @@
#include <asm/tlb.h>
#include <asm/irq_regs.h>
+#include "sched_cpupri.h"
+
/*
* Scheduler clock - returns current time in nanosec units.
* This is default implementation.
@@ -462,6 +464,7 @@ struct rt_rq {
#ifdef CONFIG_SMP
unsigned long rt_nr_migratory;
int overloaded;
+ int pushed;
#endif
int rt_throttled;
u64 rt_time;
@@ -500,6 +503,9 @@ struct root_domain {
*/
cpumask_t rto_mask;
atomic_t rto_count;
+#ifdef CONFIG_SMP
+ struct cpupri cpupri;
+#endif
};
/*
@@ -918,7 +924,7 @@ static inline u64 global_rt_runtime(void)
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
}
-static const unsigned long long time_sync_thresh = 100000;
+unsigned long long time_sync_thresh = 100000;
static DEFINE_PER_CPU(unsigned long long, time_offset);
static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
@@ -995,6 +1001,8 @@ unsigned long long notrace cpu_clock(int cpu)
if (unlikely(delta_time > time_sync_thresh))
time = __sync_cpu_clock(time, cpu);
+ per_cpu(prev_cpu_time, cpu) = time;
+
return time;
}
EXPORT_SYMBOL_GPL(cpu_clock);
@@ -5605,6 +5613,9 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
cpumask_t cpu_present_map __read_mostly;
EXPORT_SYMBOL(cpu_present_map);
+cpumask_t cpu_system_map __read_mostly = CPU_MASK_ALL;
+EXPORT_SYMBOL(cpu_system_map);
+
#ifndef CONFIG_SMP
cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
EXPORT_SYMBOL(cpu_online_map);
@@ -6919,6 +6930,8 @@ static void init_rootdomain(struct root_domain *rd)
cpus_clear(rd->span);
cpus_clear(rd->online);
+
+ cpupri_init(&rd->cpupri);
}
static void init_defrootdomain(void)
@@ -6974,24 +6987,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
rcu_assign_pointer(rq->sd, sd);
}
-/* cpus with isolated domains */
-static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
-
-/* Setup the mask of cpus configured for isolated domains */
-static int __init isolated_cpu_setup(char *str)
-{
- int ints[NR_CPUS], i;
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
- cpus_clear(cpu_isolated_map);
- for (i = 1; i <= ints[0]; i++)
- if (ints[i] < NR_CPUS)
- cpu_set(ints[i], cpu_isolated_map);
- return 1;
-}
-
-__setup("isolcpus=", isolated_cpu_setup);
-
/*
* init_sched_build_groups takes the cpumask we wish to span, and a pointer
* to a function which identifies what group(along with sched group) a CPU
@@ -7778,7 +7773,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
if (!doms_cur)
doms_cur = &fallback_doms;
- cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
+ *doms_cur = *cpu_map;
dattr_cur = NULL;
err = build_sched_domains(doms_cur);
register_sched_domain_sysctl();
@@ -7859,7 +7854,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
if (doms_new == NULL) {
ndoms_new = 1;
doms_new = &fallback_doms;
- cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
+ doms_new[0] = cpu_online_map;
dattr_new = NULL;
}
@@ -8029,7 +8024,7 @@ void __init sched_init_smp(void)
#endif
get_online_cpus();
arch_init_sched_domains(&cpu_online_map);
- cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
+ non_isolated_cpus = cpu_possible_map;
if (cpus_empty(non_isolated_cpus))
cpu_set(smp_processor_id(), non_isolated_cpus);
put_online_cpus();
@@ -8089,6 +8084,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
+ rt_rq->pushed = 0;
#endif
rt_rq->rt_time = 0;