summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorckolivas <kernel@kolivas.org>2016-10-26 12:45:38 +1100
committerCon Kolivas <kernel@kolivas.org>2016-10-27 14:00:47 +1100
commit10b6fffc3aa173a3b86d4f359e17529085cddefe (patch)
treec1335580525e5627955eee8b05a0416ace374495 /kernel/sched
parentd8bca0f15e0c2f3dbcc7910b789bd86c889f4390 (diff)
Add high res timeslice expiry when available, increasing resolution of highres timers to 100us to make low latency possible with low Hz values (i.e. 100Hz), retaining the benefit of the increased throughput low Hz values provide.
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/MuQSS.c104
-rw-r--r--kernel/sched/MuQSS.h4
2 files changed, 102 insertions, 6 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index 920d9f8c7c9a..f1301160fda6 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -123,6 +123,7 @@
*/
#define JIFFIES_TO_NS(TIME) ((TIME) * (1073741824 / HZ))
#define JIFFY_NS (1073741824 / HZ)
+#define JIFFY_US (1048576 / HZ)
#define NS_TO_JIFFIES(TIME) ((TIME) / JIFFY_NS)
#define HALF_JIFFY_NS (1073741824 / HZ / 2)
#define HALF_JIFFY_US (1048576 / HZ / 2)
@@ -130,6 +131,7 @@
#define MS_TO_US(TIME) ((TIME) << 10)
#define NS_TO_MS(TIME) ((TIME) >> 20)
#define NS_TO_US(TIME) ((TIME) >> 10)
+#define US_TO_NS(TIME) ((TIME) << 10)
#define RESCHED_US (100) /* Reschedule if less than this many μs left */
@@ -171,6 +173,8 @@ static inline int timeslice(void)
return MS_TO_US(rr_interval);
}
+static bool sched_smp_initialized __read_mostly;
+
/*
* The global runqueue data that all CPUs work off. Contains either atomic
* variables and a cpu bitmap set atomically.
@@ -1831,8 +1835,6 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
}
#ifdef CONFIG_SMP
-static bool sched_smp_initialized __read_mostly;
-
void sched_ttwu_pending(void)
{
struct rq *rq = this_rq();
@@ -3440,6 +3442,84 @@ void account_idle_ticks(unsigned long ticks)
}
#endif
+#ifdef CONFIG_HIGH_RES_TIMERS
+static inline int hrexpiry_enabled(struct rq *rq)
+{
+ if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized))
+ return 0;
+ return hrtimer_is_hres_active(&rq->hrexpiry_timer);
+}
+
+/*
+ * Use HR-timers to deliver accurate preemption points.
+ */
+static void hrexpiry_clear(struct rq *rq)
+{
+ if (!hrexpiry_enabled(rq))
+ return;
+ if (hrtimer_active(&rq->hrexpiry_timer))
+ hrtimer_cancel(&rq->hrexpiry_timer);
+}
+
+/*
+ * High-resolution time_slice expiry.
+ * Runs from hardirq context with interrupts disabled.
+ */
+static enum hrtimer_restart hrexpiry(struct hrtimer *timer)
+{
+ struct rq *rq = container_of(timer, struct rq, hrexpiry_timer);
+ struct task_struct *p = rq->curr;
+
+ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
+
+ /*
+ * We're doing this without the runqueue lock but this should always
+ * be run on the local CPU. Time slice should run out in __schedule
+ * but we set it to zero here in case niffies is slightly less.
+ */
+ p->time_slice = 0;
+ __set_tsk_resched(p);
+
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * Called to set the hrexpiry timer state.
+ *
+ * called with irqs disabled from the local CPU only
+ */
+static void hrexpiry_start(struct rq *rq, u64 delay)
+{
+ if (!hrexpiry_enabled(rq))
+ return;
+
+ hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static void init_rq_hrexpiry(struct rq *rq)
+{
+ hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->hrexpiry_timer.function = hrexpiry;
+}
+
+static inline int rq_dither(struct rq *rq)
+{
+ if (!hrexpiry_enabled(rq))
+ return HALF_JIFFY_US;
+ return 0;
+}
+#else /* CONFIG_HIGH_RES_TIMERS */
+static inline void init_rq_hrexpiry(struct rq *rq)
+{
+}
+
+static inline int rq_dither(struct rq *rq)
+{
+ return HALF_JIFFY_US;
+}
+#endif /* CONFIG_HIGH_RES_TIMERS */
+
/*
* Functions to test for when SCHED_ISO tasks have used their allocated
* quota as real time scheduling and convert them back to SCHED_NORMAL. All
@@ -3517,6 +3597,8 @@ static void task_running_tick(struct rq *rq)
* allowed to run into the 2nd half of the next tick if they will
* run out of time slice in the interim. Otherwise, if they have
* less than RESCHED_US μs of time slice left they will be rescheduled.
+ * Dither is used as a backup for when hrexpiry is disabled or high res
+ * timers not configured in.
*/
if (p->time_slice - rq->dither >= RESCHED_US)
return;
@@ -3828,6 +3910,17 @@ static inline void schedule_debug(struct task_struct *prev)
*/
static inline void set_rq_task(struct rq *rq, struct task_struct *p)
{
+#ifdef CONFIG_HIGH_RES_TIMERS
+ if (p == rq->idle || p->policy == SCHED_FIFO)
+ hrexpiry_clear(rq);
+ else
+ hrexpiry_start(rq, US_TO_NS(p->time_slice));
+#endif /* CONFIG_HIGH_RES_TIMERS */
+ if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
+ rq->dither = 0;
+ else
+ rq->dither = rq_dither(rq);
+
rq->rq_deadline = p->deadline;
rq->rq_prio = p->prio;
#ifdef CONFIG_SMT_NICE
@@ -4012,10 +4105,6 @@ static void __sched notrace __schedule(bool preempt)
update_clocks(rq);
niffies = rq->niffies;
update_cpu_clock_switch(rq, prev);
- if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
- rq->dither = 0;
- else
- rq->dither = HALF_JIFFY_US;
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
@@ -7485,6 +7574,7 @@ int sched_cpu_dying(unsigned int cpu)
}
bind_zero(cpu);
double_rq_unlock(rq, cpu_rq(0));
+ hrexpiry_clear(rq);
local_irq_restore(flags);
return 0;
@@ -7655,6 +7745,7 @@ void __init sched_init_smp(void)
#else
void __init sched_init_smp(void)
{
+ sched_smp_initialized = true;
}
#endif /* CONFIG_SMP */
@@ -7741,6 +7832,7 @@ void __init sched_init(void)
rq->cpu = i;
rq_attach_root(rq, &def_root_domain);
#endif
+ init_rq_hrexpiry(rq);
atomic_set(&rq->nr_iowait, 0);
}
diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
index 4e3115dac4a2..858c881ec832 100644
--- a/kernel/sched/MuQSS.h
+++ b/kernel/sched/MuQSS.h
@@ -85,6 +85,10 @@ struct rq {
int iso_ticks;
bool iso_refractory;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ struct hrtimer hrexpiry_timer;
+#endif
+
#ifdef CONFIG_SCHEDSTATS
/* latency stats */