summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2019-01-03 15:28:48 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-04 13:13:48 -0800
commit34ec35ad8f5f4624e8391dbb83afb4c791f027e3 (patch)
tree78a75ab321b42e345f2bd36d7c4b340764208b65 /kernel
parent3bb5f4ac55dd91d516e7e36b45c94bd57efbb068 (diff)
kernel/sched/: remove caller signal_pending branch predictions
This is already done for us internally by the signal machinery. Link: http://lkml.kernel.org/r/20181116002713.8474-3-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dave@stgolabs.net> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/swait.c2
-rw-r--r--kernel/sched/wait.c2
3 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f66920173370..17a954c9e153 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3416,7 +3416,7 @@ static void __sched notrace __schedule(bool preempt)
switch_count = &prev->nivcsw;
if (!preempt && prev->state) {
- if (unlikely(signal_pending_state(prev->state, prev))) {
+ if (signal_pending_state(prev->state, prev)) {
prev->state = TASK_RUNNING;
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
index 66b59ac77c22..e83a3f8449f6 100644
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
@@ -93,7 +93,7 @@ long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait
long ret = 0;
raw_spin_lock_irqsave(&q->lock, flags);
- if (unlikely(signal_pending_state(state, current))) {
+ if (signal_pending_state(state, current)) {
/*
* See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
* must not see us.
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 5dd47f1103d1..6eb1f8efd221 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -264,7 +264,7 @@ long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_en
long ret = 0;
spin_lock_irqsave(&wq_head->lock, flags);
- if (unlikely(signal_pending_state(state, current))) {
+ if (signal_pending_state(state, current)) {
/*
* Exclusive waiter must not fail if it was selected by wakeup,
* it should "consume" the condition we were waiting for.