diff options
author | Xavier Boudet <x-boudet@ti.com> | 2012-10-15 10:18:09 +0200 |
---|---|---|
committer | Xavier Boudet <x-boudet@ti.com> | 2012-10-15 10:18:09 +0200 |
commit | 309bb6d5a4d7f8408e521dd443e52d4c796626c6 (patch) | |
tree | 3ee17e79e8a9bb7b2476e915a617bf6e105779c0 /kernel | |
parent | 4e1b3b8d7df3e4146c60ff79a313f36ba227924c (diff) | |
parent | e3f681ae691bd64d1dc06bab9ad4c98ad4effa82 (diff) |
Merge remote-tracking branch 'upstream-stable/linux-3.4.y' into tilt-3.4
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 3 | ||||
-rw-r--r-- | kernel/rcutree.c | 4 | ||||
-rw-r--r-- | kernel/sched/core.c | 40 | ||||
-rw-r--r-- | kernel/sched/stop_task.c | 22 | ||||
-rw-r--r-- | kernel/sys.c | 1 | ||||
-rw-r--r-- | kernel/workqueue.c | 2 |
6 files changed, 66 insertions, 6 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 14f7070b4ba2..5fc1570e64c1 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2065,6 +2065,9 @@ static void scan_for_empty_cpusets(struct cpuset *root) * (of no affect) on systems that are actively using CPU hotplug * but making no active use of cpusets. * + * The only exception to this is suspend/resume, where we don't + * modify cpusets at all. + * * This routine ensures that top_cpuset.cpus_allowed tracks * cpu_active_mask on each CPU hotplug (cpuhp) event. * diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d0c5baf1ab18..4eec66e0350d 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -295,7 +295,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) static int cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) { - return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); + return *rdp->nxttail[RCU_DONE_TAIL + + ACCESS_ONCE(rsp->completed) != rdp->completed] && + !rcu_gp_in_progress(rsp); } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5d0ccba4c453..5aadf630c8ea 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6937,34 +6937,66 @@ int __init sched_create_sysfs_power_savings_entries(struct device *dev) } #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ +static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ + /* * Update cpusets according to cpu_active mask. If cpusets are * disabled, cpuset_update_active_cpus() becomes a simple wrapper * around partition_sched_domains(). + * + * If we come here as part of a suspend/resume, don't touch cpusets because we + * want to restore it back to its original state upon resume anyway. */ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { - switch (action & ~CPU_TASKS_FROZEN) { + switch (action) { + case CPU_ONLINE_FROZEN: + case CPU_DOWN_FAILED_FROZEN: + + /* + * num_cpus_frozen tracks how many CPUs are involved in suspend + * resume sequence. As long as this is not the last online + * operation in the resume sequence, just build a single sched + * domain, ignoring cpusets. + */ + num_cpus_frozen--; + if (likely(num_cpus_frozen)) { + partition_sched_domains(1, NULL, NULL); + break; + } + + /* + * This is the last CPU online operation. So fall through and + * restore the original sched domains by considering the + * cpuset configurations. + */ + case CPU_ONLINE: case CPU_DOWN_FAILED: cpuset_update_active_cpus(); - return NOTIFY_OK; + break; default: return NOTIFY_DONE; } + return NOTIFY_OK; } static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, void *hcpu) { - switch (action & ~CPU_TASKS_FROZEN) { + switch (action) { case CPU_DOWN_PREPARE: cpuset_update_active_cpus(); - return NOTIFY_OK; + break; + case CPU_DOWN_PREPARE_FROZEN: + num_cpus_frozen++; + partition_sched_domains(1, NULL, NULL); + break; default: return NOTIFY_DONE; } + return NOTIFY_OK; } void __init sched_init_smp(void) diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 7b386e86fd23..da5eb5bed84a 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq) { struct task_struct *stop = rq->stop; - if (stop && stop->on_rq) + if (stop && stop->on_rq) { + stop->se.exec_start = rq->clock_task; return stop; + } return NULL; } @@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq) static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) { + struct task_struct *curr = rq->curr; + u64 delta_exec; + + delta_exec = rq->clock_task - curr->se.exec_start; + if (unlikely((s64)delta_exec < 0)) + delta_exec = 0; + + schedstat_set(curr->se.statistics.exec_max, + max(curr->se.statistics.exec_max, delta_exec)); + + curr->se.sum_exec_runtime += delta_exec; + account_group_exec_runtime(curr, delta_exec); + + curr->se.exec_start = rq->clock_task; + cpuacct_charge(curr, delta_exec); } static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) @@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) static void set_curr_task_stop(struct rq *rq) { + struct task_struct *stop = rq->stop; + + stop->se.exec_start = rq->clock_task; } static void switched_to_stop(struct rq *rq, struct task_struct *p) diff --git a/kernel/sys.c b/kernel/sys.c index e7006eb6c1e4..898a84c9df5b 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -365,6 +365,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier); void kernel_restart(char *cmd) { kernel_restart_prepare(cmd); + disable_nonboot_cpus(); if (!cmd) printk(KERN_EMERG "Restarting system.\n"); else diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7584322349c1..56f793d044a5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1864,7 +1864,9 @@ __acquires(&gcwq->lock) spin_unlock_irq(&gcwq->lock); + smp_wmb(); /* paired with test_and_set_bit(PENDING) */ work_clear_pending(work); + lock_map_acquire_read(&cwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); trace_workqueue_execute_start(work); |