summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2017-08-12 12:35:57 +1000
committerCon Kolivas <kernel@kolivas.org>2017-08-12 12:46:01 +1000
commit86e4df549dc3cbd04167b7120fbc43415c1c93ee (patch)
tree3686d8288caaf2884df807c14aec78b1627c0349
parent11da639caf53211f06c5f05ef4637a6bf551fa2e (diff)
MuQSS fixes
-rw-r--r--kernel/livepatch/transition.c8
-rw-r--r--kernel/sched/MuQSS.c105
-rw-r--r--kernel/sched/MuQSS.h101
3 files changed, 111 insertions, 103 deletions
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index b004a1fb6032..ac22ea5f8ae0 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -277,6 +277,12 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
return 0;
}
+#ifdef CONFIG_SCHED_MUQSS
+typedef unsigned long rq_flags_t;
+#else
+typedef struct rq_flags rq_flag_t;
+#endif
+
/*
* Try to safely switch a task to the target patch state. If it's currently
* running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
@@ -285,7 +291,7 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
static bool klp_try_switch_task(struct task_struct *task)
{
struct rq *rq;
- struct rq_flags flags;
+ rq_flags_t flags;
int ret;
bool success = false;
char err_buf[STACK_ERR_BUF_SIZE];
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index 60ae7fdf017e..108f2d610ba4 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -167,7 +167,6 @@ struct rq *cpu_rq(int cpu)
{
return &per_cpu(runqueues, (cpu));
}
-#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
/*
@@ -308,20 +307,6 @@ static inline void update_clocks(struct rq *rq)
}
}
-static inline int task_current(struct rq *rq, struct task_struct *p)
-{
- return rq->curr == p;
-}
-
-static inline int task_running(struct rq *rq, struct task_struct *p)
-{
-#ifdef CONFIG_SMP
- return p->on_cpu;
-#else
- return task_current(rq, p);
-#endif
-}
-
static inline int task_on_rq_queued(struct task_struct *p)
{
return p->on_rq == TASK_ON_RQ_QUEUED;
@@ -332,24 +317,12 @@ static inline int task_on_rq_migrating(struct task_struct *p)
return p->on_rq == TASK_ON_RQ_MIGRATING;
}
-static inline void rq_lock(struct rq *rq)
- __acquires(rq->lock)
-{
- raw_spin_lock(&rq->lock);
-}
-
static inline int rq_trylock(struct rq *rq)
__acquires(rq->lock)
{
return raw_spin_trylock(&rq->lock);
}
-static inline void rq_unlock(struct rq *rq)
- __releases(rq->lock)
-{
- raw_spin_unlock(&rq->lock);
-}
-
/*
* Any time we have two runqueues locked we use that as an opportunity to
* synchronise niffies to the highest value as idle ticks may have artificially
@@ -469,78 +442,6 @@ static inline void unlock_rq(struct rq *rq)
do_raw_spin_unlock(&rq->lock);
}
-static inline void rq_lock_irq(struct rq *rq)
- __acquires(rq->lock)
-{
- raw_spin_lock_irq(&rq->lock);
-}
-
-static inline void rq_unlock_irq(struct rq *rq)
- __releases(rq->lock)
-{
- raw_spin_unlock_irq(&rq->lock);
-}
-
-static inline void rq_lock_irqsave(struct rq *rq, unsigned long *flags)
- __acquires(rq->lock)
-{
- raw_spin_lock_irqsave(&rq->lock, *flags);
-}
-
-static inline void rq_unlock_irqrestore(struct rq *rq, unsigned long *flags)
- __releases(rq->lock)
-{
- raw_spin_unlock_irqrestore(&rq->lock, *flags);
-}
-
-struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
- __acquires(p->pi_lock)
- __acquires(rq->lock)
-{
- struct rq *rq;
-
- while (42) {
- raw_spin_lock_irqsave(&p->pi_lock, *flags);
- rq = task_rq(p);
- raw_spin_lock(&rq->lock);
- if (likely(rq == task_rq(p)))
- break;
- raw_spin_unlock(&rq->lock);
- raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
- }
- return rq;
-}
-
-void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
- __releases(rq->lock)
- __releases(p->pi_lock)
-{
- rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
-
-static inline struct rq *__task_rq_lock(struct task_struct *p)
- __acquires(rq->lock)
-{
- struct rq *rq;
-
- lockdep_assert_held(&p->pi_lock);
-
- while (42) {
- rq = task_rq(p);
- raw_spin_lock(&rq->lock);
- if (likely(rq == task_rq(p)))
- break;
- raw_spin_unlock(&rq->lock);
- }
- return rq;
-}
-
-static inline void __task_rq_unlock(struct rq *rq)
-{
- rq_unlock(rq);
-}
-
/*
* cmpxchg based fetch_or, macro so it works for different integer types
*/
@@ -4160,8 +4061,8 @@ static inline int rt_effective_prio(struct task_struct *p, int prio)
*/
void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
{
+ int prio, oldprio;
struct rq *rq;
- int oldprio;
/* XXX used to be waiter->prio, not waiter->task->prio */
prio = __rt_effective_prio(pi_task, p->normal_prio);
@@ -4169,7 +4070,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
/*
* If nothing changed; bail early.
*/
- if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
+ if (p->pi_top_task == pi_task && prio == p->prio)
return;
rq = __task_rq_lock(p);
@@ -4189,7 +4090,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
/*
* For FIFO/RR we only need to set prio, if that matches we're done.
*/
- if (prio == p->prio && !dl_prio(prio))
+ if (prio == p->prio)
goto out_unlock;
/*
diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
index 1b559fba7966..3e27e699ff82 100644
--- a/kernel/sched/MuQSS.h
+++ b/kernel/sched/MuQSS.h
@@ -23,6 +23,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/kernel_stat.h>
+#include <linux/tick.h>
#include <linux/slab.h>
#ifdef CONFIG_PARAVIRT
@@ -266,6 +267,106 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define raw_rq() raw_cpu_ptr(&runqueues)
#endif /* CONFIG_SMP */
+#define task_rq(p) cpu_rq(task_cpu(p))
+
+static inline int task_current(struct rq *rq, struct task_struct *p)
+{
+ return rq->curr == p;
+}
+
+static inline int task_running(struct rq *rq, struct task_struct *p)
+{
+#ifdef CONFIG_SMP
+ return p->on_cpu;
+#else
+ return task_current(rq, p);
+#endif
+}
+
+static inline void rq_lock(struct rq *rq)
+ __acquires(rq->lock)
+{
+ raw_spin_lock(&rq->lock);
+}
+
+static inline void rq_unlock(struct rq *rq)
+ __releases(rq->lock)
+{
+ raw_spin_unlock(&rq->lock);
+}
+
+static inline void rq_lock_irq(struct rq *rq)
+ __acquires(rq->lock)
+{
+ raw_spin_lock_irq(&rq->lock);
+}
+
+static inline void rq_unlock_irq(struct rq *rq)
+ __releases(rq->lock)
+{
+ raw_spin_unlock_irq(&rq->lock);
+}
+
+static inline void rq_lock_irqsave(struct rq *rq, unsigned long *flags)
+ __acquires(rq->lock)
+{
+ raw_spin_lock_irqsave(&rq->lock, *flags);
+}
+
+static inline void rq_unlock_irqrestore(struct rq *rq, unsigned long *flags)
+ __releases(rq->lock)
+{
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
+}
+
+static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+ __acquires(p->pi_lock)
+ __acquires(rq->lock)
+{
+ struct rq *rq;
+
+ while (42) {
+ raw_spin_lock_irqsave(&p->pi_lock, *flags);
+ rq = task_rq(p);
+ raw_spin_lock(&rq->lock);
+ if (likely(rq == task_rq(p)))
+ break;
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+ }
+ return rq;
+}
+
+static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+ __releases(rq->lock)
+ __releases(p->pi_lock)
+{
+ rq_unlock(rq);
+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+}
+
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+ __acquires(rq->lock)
+{
+ struct rq *rq;
+
+ lockdep_assert_held(&p->pi_lock);
+
+ while (42) {
+ rq = task_rq(p);
+ raw_spin_lock(&rq->lock);
+ if (likely(rq == task_rq(p)))
+ break;
+ raw_spin_unlock(&rq->lock);
+ }
+ return rq;
+}
+
+static inline void __task_rq_unlock(struct rq *rq)
+{
+ rq_unlock(rq);
+}
+
/*
* {de,en}queue flags: Most not used on MuQSS.
*