summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 16:23:15 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 16:23:15 +0200
commitfaafcba3b5e15999cf75d5c5a513ac8e47e2545f (patch)
tree47d58d1c00e650e820506c91eb9a41268756bdda /fs
parent13ead805c5a14b0e7ecd34f61404a5bfba655895 (diff)
parentf10e00f4bf360c36edbe6bf18a6c75b171cbe012 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle were: - Optimized support for Intel "Cluster-on-Die" (CoD) topologies (Dave Hansen) - Various sched/idle refinements for better idle handling (Nicolas Pitre, Daniel Lezcano, Chuansheng Liu, Vincent Guittot) - sched/numa updates and optimizations (Rik van Riel) - sysbench speedup (Vincent Guittot) - capacity calculation cleanups/refactoring (Vincent Guittot) - Various cleanups to thread group iteration (Oleg Nesterov) - Double-rq-lock removal optimization and various refactorings (Kirill Tkhai) - various sched/deadline fixes ... and lots of other changes" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (72 commits) sched/dl: Use dl_bw_of() under rcu_read_lock_sched() sched/fair: Delete resched_cpu() from idle_balance() sched, time: Fix build error with 64 bit cputime_t on 32 bit systems sched: Improve sysbench performance by fixing spurious active migration sched/x86: Fix up typo in topology detection x86, sched: Add new topology for multi-NUMA-node CPUs sched/rt: Use resched_curr() in task_tick_rt() sched: Use rq->rd in sched_setaffinity() under RCU read lock sched: cleanup: Rename 'out_unlock' to 'out_free_new_mask' sched: Use dl_bw_of() under RCU read lock sched/fair: Remove duplicate code from can_migrate_task() sched, mips, ia64: Remove __ARCH_WANT_UNLOCKED_CTXSW sched: print_rq(): Don't use tasklist_lock sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock() sched: Fix the task-group check in tg_has_rt_tasks() sched/fair: Leverage the idle state info when choosing the "idlest" cpu sched: Let the scheduler see CPU idle states sched/deadline: Fix inter- exclusive cpusets migrations sched/deadline: Clear dl_entity params when setscheduling to different class sched/numa: Kill the wrong/dead TASK_DEAD check in task_numa_fault() ...
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/vlocation.c1
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c3
-rw-r--r--fs/nfs/blocklayout/rpc_pipefs.c1
-rw-r--r--fs/nfsd/nfs4recover.c1
5 files changed, 0 insertions, 8 deletions
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index b6df2e83809f..52976785a32c 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -130,7 +130,6 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
/* second+ BUSY - sleep a little bit */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
- __set_current_state(TASK_RUNNING);
}
continue;
}
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 0acddf60af55..bc462dcd7a40 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1585,7 +1585,6 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
set_current_state(TASK_UNINTERRUPTIBLE);
LOGGC_UNLOCK(log);
schedule();
- __set_current_state(TASK_RUNNING);
LOGGC_LOCK(log);
remove_wait_queue(&target->gcwait, &__wait);
}
@@ -2359,7 +2358,6 @@ int jfsIOWait(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&log_redrive_lock);
schedule();
- __set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 564c4f279ac6..d595856453b2 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -136,7 +136,6 @@ static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
set_current_state(TASK_UNINTERRUPTIBLE);
TXN_UNLOCK();
io_schedule();
- __set_current_state(TASK_RUNNING);
remove_wait_queue(event, &wait);
}
@@ -2808,7 +2807,6 @@ int jfs_lazycommit(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
LAZY_UNLOCK(flags);
schedule();
- __set_current_state(TASK_RUNNING);
remove_wait_queue(&jfs_commit_thread_wait, &wq);
}
} while (!kthread_should_stop());
@@ -2996,7 +2994,6 @@ int jfs_sync(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
TXN_UNLOCK();
schedule();
- __set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c
index 8d04bda2bd2e..e966c023b1b7 100644
--- a/fs/nfs/blocklayout/rpc_pipefs.c
+++ b/fs/nfs/blocklayout/rpc_pipefs.c
@@ -92,7 +92,6 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
- __set_current_state(TASK_RUNNING);
remove_wait_queue(&nn->bl_wq, &wq);
if (reply->status != BL_DEVICE_REQUEST_PROC) {
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index ea95a2bc21b5..a25490ae6c62 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -675,7 +675,6 @@ __cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
}
schedule();
- set_current_state(TASK_RUNNING);
if (msg.errno < 0)
ret = msg.errno;