diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2008-06-04 13:49:24 +1000 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2008-06-04 13:49:24 +1000 |
commit | 893b0d6ad52ca64f1cb8ece21528c2592c1f475c (patch) | |
tree | c18a1d0219ea0ec9b3e4bff6a779d8c690c351e8 /kernel | |
parent | 1662bec21c72e4e7071f3eafa99ae56533f8903b (diff) | |
parent | d20999dcec4b97c7241cb3bf16358e3efdd28b8b (diff) |
Merge branch 'quilt/rr'
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 13 | ||||
-rw-r--r-- | kernel/kthread.c | 29 | ||||
-rw-r--r-- | kernel/mutex.c | 4 | ||||
-rw-r--r-- | kernel/printk.c | 4 | ||||
-rw-r--r-- | kernel/semaphore.c | 17 | ||||
-rw-r--r-- | kernel/stop_machine.c | 278 |
6 files changed, 143 insertions, 202 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index a40b4f263149..787d3f4f7a63 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -192,7 +192,6 @@ static int __ref take_cpu_down(void *_param) static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { int err, nr_calls = 0; - struct task_struct *p; cpumask_t old_allowed, tmp; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; @@ -226,19 +225,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) cpu_clear(cpu, tmp); set_cpus_allowed_ptr(current, &tmp); - p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); + err = __stop_machine_run(take_cpu_down, &tcd_param, cpu); - if (IS_ERR(p) || cpu_online(cpu)) { + if (err || cpu_online(cpu)) { /* CPU didn't die: tell everyone. Can't complain. */ if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, hcpu) == NOTIFY_BAD) BUG(); - if (IS_ERR(p)) { - err = PTR_ERR(p); - goto out_allowed; - } - goto out_thread; + goto out_allowed; } /* Wait for it to sleep (leaving idle task). */ @@ -255,8 +250,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) check_for_tasks(cpu); -out_thread: - err = kthread_stop(p); out_allowed: set_cpus_allowed_ptr(current, &old_allowed); out_release: diff --git a/kernel/kthread.c b/kernel/kthread.c index bd1b9ea024e1..9eb21645765f 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -111,29 +111,10 @@ static void create_kthread(struct kthread_create_info *create) complete(&create->done); } -/** - * kthread_create - create a kthread. - * @threadfn: the function to run until signal_pending(current). - * @data: data ptr for @threadfn. - * @namefmt: printf-style name for the thread. - * - * Description: This helper function creates and names a kernel - * thread. The thread will be stopped: use wake_up_process() to start - * it. See also kthread_run(), kthread_create_on_cpu(). - * - * When woken, the thread will run @threadfn() with @data as its - * argument. @threadfn() can either call do_exit() directly if it is a - * standalone thread for which noone will call kthread_stop(), or - * return when 'kthread_should_stop()' is true (which means - * kthread_stop() has been called). The return value should be zero - * or a negative error number; it will be passed to kthread_stop(). - * - * Returns a task_struct or ERR_PTR(-ENOMEM). - */ -struct task_struct *kthread_create(int (*threadfn)(void *data), - void *data, - const char namefmt[], - ...) +struct task_struct *__kthread_create(int (*threadfn)(void *data), + void *data, + const char namefmt[], + ...) { struct kthread_create_info create; @@ -158,7 +139,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), } return create.result; } -EXPORT_SYMBOL(kthread_create); +EXPORT_SYMBOL(__kthread_create); /** * kthread_bind - bind a just-created kthread to a cpu. diff --git a/kernel/mutex.c b/kernel/mutex.c index d046a345d365..9e98f985c5bf 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -373,8 +373,8 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) * Try to acquire the mutex atomically. Returns 1 if the mutex * has been acquired successfully, and 0 on contention. * - * NOTE: this function follows the spin_trylock() convention, so - * it is negated to the down_trylock() return values! Be careful + * NOTE: this function follows the spin_trylock()/down_try() convention, + * so it is negated to the old down_trylock() return values! Be careful * about this when converting semaphore users to mutexes. * * This function must not be used in interrupt context. The diff --git a/kernel/printk.c b/kernel/printk.c index 4aa44f163563..c5b4aea992a6 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -989,7 +989,7 @@ EXPORT_SYMBOL(acquire_console_sem); int try_acquire_console_sem(void) { - if (down_trylock(&console_sem)) + if (!down_try(&console_sem)) return -1; console_locked = 1; console_may_schedule = 0; @@ -1088,7 +1088,7 @@ void console_unblank(void) * oops_in_progress is set to 1.. */ if (oops_in_progress) { - if (down_trylock(&console_sem) != 0) + if (!down_try(&console_sem)) return; } else acquire_console_sem(); diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 1a064adab658..bbab232ee185 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -14,7 +14,7 @@ * Some notes on the implementation: * * The spinlock controls access to the other members of the semaphore. - * down_trylock() and up() can be called from interrupt context, so we + * down_try() and up() can be called from interrupt context, so we * have to disable interrupts when taking the lock. It turns out various * parts of the kernel expect to be able to use down() on a semaphore in * interrupt context when they know it will succeed, so we have to use @@ -116,19 +116,18 @@ int down_killable(struct semaphore *sem) EXPORT_SYMBOL(down_killable); /** - * down_trylock - try to acquire the semaphore, without waiting + * down_try - try to acquire the semaphore, without waiting * @sem: the semaphore to be acquired * - * Try to acquire the semaphore atomically. Returns 0 if the mutex has - * been acquired successfully or 1 if it it cannot be acquired. + * Try to acquire the semaphore atomically. Returns true if the mutex has + * been acquired successfully or 0 if it it cannot be acquired. * - * NOTE: This return value is inverted from both spin_trylock and - * mutex_trylock! Be careful about this when converting code. + * NOTE: This replaces down_trylock() which returned the reverse. * * Unlike mutex_trylock, this function can be used from interrupt context, * and the semaphore can be released by any task or interrupt. */ -int down_trylock(struct semaphore *sem) +int down_try(struct semaphore *sem) { unsigned long flags; int count; @@ -139,9 +138,9 @@ int down_trylock(struct semaphore *sem) sem->count = count; spin_unlock_irqrestore(&sem->lock, flags); - return (count < 0); + return (count >= 0); } -EXPORT_SYMBOL(down_trylock); +EXPORT_SYMBOL(down_try); /** * down_timeout - acquire the semaphore within a specified time diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index b7350bbfb076..36e166def7c7 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -13,203 +13,171 @@ #include <asm/atomic.h> #include <asm/uaccess.h> -/* Since we effect priority and affinity (both of which are visible - * to, and settable by outside processes) we do indirection via a - * kthread. */ - -/* Thread to stop each CPU in user context. */ +/* This controls the threads on each CPU. */ enum stopmachine_state { - STOPMACHINE_WAIT, - STOPMACHINE_PREPARE, + /* Dummy starting state for thread. */ + STOPMACHINE_NONE, + /* Disable interrupts. */ STOPMACHINE_DISABLE_IRQ, + /* Run the function */ + STOPMACHINE_RUN, + /* Exit */ STOPMACHINE_EXIT, + /* Everyone exited. */ + STOPMACHINE_COMPLETE, }; +static enum stopmachine_state state; -static enum stopmachine_state stopmachine_state; -static unsigned int stopmachine_num_threads; -static atomic_t stopmachine_thread_ack; +struct stop_machine_data { + int (*fn)(void *); + void *data; + int fnret; +}; + +/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ +static unsigned int num_threads; +static atomic_t thread_ack; +static struct completion finished; -static int stopmachine(void *cpu) +static void set_state(enum stopmachine_state newstate) { - int irqs_disabled = 0; - int prepared = 0; + /* Reset ack counter. */ + atomic_set(&thread_ack, num_threads); + smp_wmb(); + state = newstate; +} - set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); +/* Last one to ack a state moves to the next state. */ +static void ack_state(void) +{ + if (atomic_dec_and_test(&thread_ack)) { + set_state(state + 1); + if (state == STOPMACHINE_COMPLETE) + complete(&finished); + } +} - /* Ack: we are alive */ - smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ - atomic_inc(&stopmachine_thread_ack); +/* This is the actual thread which stops the CPU. It exits by itself rather + * than waiting for kthread_stop(), because it's easier for hotplug CPU. */ +static int stop_cpu(struct stop_machine_data *smdata) +{ + enum stopmachine_state curstate = STOPMACHINE_NONE; + int uninitialized_var(ret); /* Simple state machine */ - while (stopmachine_state != STOPMACHINE_EXIT) { - if (stopmachine_state == STOPMACHINE_DISABLE_IRQ - && !irqs_disabled) { - local_irq_disable(); - hard_irq_disable(); - irqs_disabled = 1; - /* Ack: irqs disabled. */ - smp_mb(); /* Must read state first. */ - atomic_inc(&stopmachine_thread_ack); - } else if (stopmachine_state == STOPMACHINE_PREPARE - && !prepared) { - /* Everyone is in place, hold CPU. */ - preempt_disable(); - prepared = 1; - smp_mb(); /* Must read state first. */ - atomic_inc(&stopmachine_thread_ack); - } - /* Yield in first stage: migration threads need to - * help our sisters onto their CPUs. */ - if (!prepared && !irqs_disabled) - yield(); + do { + /* Chill out and ensure we re-read stopmachine_state. */ cpu_relax(); - } - - /* Ack: we are exiting. */ - smp_mb(); /* Must read state first. */ - atomic_inc(&stopmachine_thread_ack); - - if (irqs_disabled) - local_irq_enable(); - if (prepared) - preempt_enable(); + if (state != curstate) { + curstate = state; + switch (curstate) { + case STOPMACHINE_DISABLE_IRQ: + local_irq_disable(); + hard_irq_disable(); + break; + case STOPMACHINE_RUN: + /* |= allows error detection if functions on + * multiple CPUs. */ + smdata->fnret |= smdata->fn(smdata->data); + break; + default: + break; + } + ack_state(); + } + } while (curstate < STOPMACHINE_EXIT); - return 0; + local_irq_enable(); + do_exit(0); } -/* Change the thread state */ -static void stopmachine_set_state(enum stopmachine_state state) +/* Callback for CPUs which aren't supposed to do anything. */ +static int chill(void *unused) { - atomic_set(&stopmachine_thread_ack, 0); - smp_wmb(); - stopmachine_state = state; - while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) - cpu_relax(); + return 0; } -static int stop_machine(void) +int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) { - int i, ret = 0; - - atomic_set(&stopmachine_thread_ack, 0); - stopmachine_num_threads = 0; - stopmachine_state = STOPMACHINE_WAIT; - - for_each_online_cpu(i) { - if (i == raw_smp_processor_id()) - continue; - ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); - if (ret < 0) - break; - stopmachine_num_threads++; - } - - /* Wait for them all to come to life. */ - while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) { - yield(); - cpu_relax(); - } - - /* If some failed, kill them all. */ - if (ret < 0) { - stopmachine_set_state(STOPMACHINE_EXIT); - return ret; - } + int i, err; + struct stop_machine_data active, idle; + struct task_struct **threads; - /* Now they are all started, make them hold the CPUs, ready. */ - preempt_disable(); - stopmachine_set_state(STOPMACHINE_PREPARE); + active.fn = fn; + active.data = data; + active.fnret = 0; + idle.fn = chill; + idle.data = NULL; - /* Make them disable irqs. */ - local_irq_disable(); - hard_irq_disable(); - stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); + /* If they don't care which cpu fn runs on, just pick one. */ + if (cpu == NR_CPUS) + cpu = any_online_cpu(cpu_online_map); - return 0; -} + /* This could be too big for stack on large machines. */ + threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL); + if (!threads) + return -ENOMEM; -static void restart_machine(void) -{ - stopmachine_set_state(STOPMACHINE_EXIT); - local_irq_enable(); - preempt_enable_no_resched(); -} + /* Set up initial state. */ + init_completion(&finished); + num_threads = num_online_cpus(); + set_state(STOPMACHINE_DISABLE_IRQ); -struct stop_machine_data { - int (*fn)(void *); - void *data; - struct completion done; -}; + for_each_online_cpu(i) { + struct stop_machine_data *smdata; + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; -static int do_stop(void *_smdata) -{ - struct stop_machine_data *smdata = _smdata; - int ret; + if (cpu == ALL_CPUS || i == cpu) + smdata = &active; + else + smdata = &idle; - ret = stop_machine(); - if (ret == 0) { - ret = smdata->fn(smdata->data); - restart_machine(); - } + threads[i] = kthread_create(stop_cpu, smdata, "kstop%u", i); + if (IS_ERR(threads[i])) { + err = PTR_ERR(threads[i]); + threads[i] = NULL; + goto kill_threads; + } - /* We're done: you can kthread_stop us now */ - complete(&smdata->done); + /* Place it onto correct cpu. */ + kthread_bind(threads[i], i); - /* Wait for kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { - schedule(); - set_current_state(TASK_INTERRUPTIBLE); + /* Make it highest prio. */ + if (sched_setscheduler(threads[i], SCHED_FIFO, ¶m) != 0) + BUG(); } - __set_current_state(TASK_RUNNING); - return ret; -} - -struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu) -{ - static DEFINE_MUTEX(stopmachine_mutex); - struct stop_machine_data smdata; - struct task_struct *p; - smdata.fn = fn; - smdata.data = data; - init_completion(&smdata.done); + /* We've created all the threads. Wake them all: hold this CPU so one + * doesn't hit this CPU until we're ready. */ + cpu = get_cpu(); + for_each_online_cpu(i) + wake_up_process(threads[i]); - mutex_lock(&stopmachine_mutex); + /* This will release the thread on our CPU. */ + put_cpu(); + wait_for_completion(&finished); - /* If they don't care which CPU fn runs on, bind to any online one. */ - if (cpu == NR_CPUS) - cpu = raw_smp_processor_id(); + kfree(threads); - p = kthread_create(do_stop, &smdata, "kstopmachine"); - if (!IS_ERR(p)) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + return active.fnret; - /* One high-prio thread per cpu. We'll do this one. */ - sched_setscheduler(p, SCHED_FIFO, ¶m); - kthread_bind(p, cpu); - wake_up_process(p); - wait_for_completion(&smdata.done); - } - mutex_unlock(&stopmachine_mutex); - return p; +kill_threads: + for_each_online_cpu(i) + if (threads[i]) + kthread_stop(threads[i]); + kfree(threads); + return err; } -int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) +int stop_machine_run_notype(int (*fn)(void *), void *data, unsigned int cpu) { - struct task_struct *p; int ret; /* No CPUs can come up or down during this. */ get_online_cpus(); - p = __stop_machine_run(fn, data, cpu); - if (!IS_ERR(p)) - ret = kthread_stop(p); - else - ret = PTR_ERR(p); + ret = __stop_machine_run(fn, data, cpu); put_online_cpus(); return ret; } -EXPORT_SYMBOL_GPL(stop_machine_run); +EXPORT_SYMBOL_GPL(stop_machine_run_notype); |