diff options
Diffstat (limited to 'linux')
-rw-r--r-- | linux/semaphore.c | 191 | ||||
-rw-r--r-- | linux/six.c | 55 |
2 files changed, 237 insertions, 9 deletions
diff --git a/linux/semaphore.c b/linux/semaphore.c new file mode 100644 index 00000000..b7d4b517 --- /dev/null +++ b/linux/semaphore.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2008 Intel Corporation + * Author: Matthew Wilcox <willy@linux.intel.com> + * + * This file implements counting semaphores. + * A counting semaphore may be acquired 'n' times before sleeping. + * See mutex.c for single-acquisition sleeping locks which enforce + * rules which allow code to be debugged more easily. + */ + +/* + * Some notes on the implementation: + * + * The spinlock controls access to the other members of the semaphore. + * down_trylock() and up() can be called from interrupt context, so we + * have to disable interrupts when taking the lock. It turns out various + * parts of the kernel expect to be able to use down() on a semaphore in + * interrupt context when they know it will succeed, so we have to use + * irqsave variants for down(), down_interruptible() and down_killable() + * too. + * + * The ->count variable represents how many more tasks can acquire this + * semaphore. If it's zero, there may be tasks waiting on the wait_list. + */ + +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/semaphore.h> +#include <linux/spinlock.h> + +static noinline void __down(struct semaphore *sem); +static noinline int __down_timeout(struct semaphore *sem, long timeout); +static noinline void __up(struct semaphore *sem); + +/** + * down - acquire the semaphore + * @sem: the semaphore to be acquired + * + * Acquires the semaphore. If no more tasks are allowed to acquire the + * semaphore, calling this function will put the task to sleep until the + * semaphore is released. + * + * Use of this function is deprecated, please use down_interruptible() or + * down_killable() instead. + */ +void down(struct semaphore *sem) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&sem->lock, flags); + if (likely(sem->count > 0)) + sem->count--; + else + __down(sem); + raw_spin_unlock_irqrestore(&sem->lock, flags); +} +EXPORT_SYMBOL(down); + +/** + * down_trylock - try to acquire the semaphore, without waiting + * @sem: the semaphore to be acquired + * + * Try to acquire the semaphore atomically. Returns 0 if the semaphore has + * been acquired successfully or 1 if it it cannot be acquired. + * + * NOTE: This return value is inverted from both spin_trylock and + * mutex_trylock! Be careful about this when converting code. + * + * Unlike mutex_trylock, this function can be used from interrupt context, + * and the semaphore can be released by any task or interrupt. + */ +int down_trylock(struct semaphore *sem) +{ + unsigned long flags; + int count; + + raw_spin_lock_irqsave(&sem->lock, flags); + count = sem->count - 1; + if (likely(count >= 0)) + sem->count = count; + raw_spin_unlock_irqrestore(&sem->lock, flags); + + return (count < 0); +} +EXPORT_SYMBOL(down_trylock); + +/** + * down_timeout - acquire the semaphore within a specified time + * @sem: the semaphore to be acquired + * @timeout: how long to wait before failing + * + * Attempts to acquire the semaphore. If no more tasks are allowed to + * acquire the semaphore, calling this function will put the task to sleep. + * If the semaphore is not released within the specified number of jiffies, + * this function returns -ETIME. It returns 0 if the semaphore was acquired. + */ +int down_timeout(struct semaphore *sem, long timeout) +{ + unsigned long flags; + int result = 0; + + raw_spin_lock_irqsave(&sem->lock, flags); + if (likely(sem->count > 0)) + sem->count--; + else + result = __down_timeout(sem, timeout); + raw_spin_unlock_irqrestore(&sem->lock, flags); + + return result; +} +EXPORT_SYMBOL(down_timeout); + +/** + * up - release the semaphore + * @sem: the semaphore to release + * + * Release the semaphore. Unlike mutexes, up() may be called from any + * context and even by tasks which have never called down(). + */ +void up(struct semaphore *sem) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&sem->lock, flags); + if (likely(list_empty(&sem->wait_list))) + sem->count++; + else + __up(sem); + raw_spin_unlock_irqrestore(&sem->lock, flags); +} +EXPORT_SYMBOL(up); + +/* Functions for the contended case */ + +struct semaphore_waiter { + struct list_head list; + struct task_struct *task; + bool up; +}; + +/* + * Because this function is inlined, the 'state' parameter will be + * constant, and thus optimised away by the compiler. Likewise the + * 'timeout' parameter for the cases without timeouts. + */ +static inline int __sched __down_common(struct semaphore *sem, long state, + long timeout) +{ + struct semaphore_waiter waiter; + + list_add_tail(&waiter.list, &sem->wait_list); + waiter.task = current; + waiter.up = false; + + for (;;) { + if (unlikely(timeout <= 0)) + goto timed_out; + __set_current_state(state); + raw_spin_unlock_irq(&sem->lock); + timeout = schedule_timeout(timeout); + raw_spin_lock_irq(&sem->lock); + if (waiter.up) + return 0; + } + + timed_out: + list_del(&waiter.list); + return -ETIME; +} + +static noinline void __sched __down(struct semaphore *sem) +{ + __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); +} + +static noinline int __sched __down_timeout(struct semaphore *sem, long timeout) +{ + return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout); +} + +static noinline void __sched __up(struct semaphore *sem) +{ + struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, + struct semaphore_waiter, list); + list_del(&waiter->list); + waiter->up = true; + wake_up_process(waiter->task); +} diff --git a/linux/six.c b/linux/six.c index 3d863a9b..49d46ed2 100644 --- a/linux/six.c +++ b/linux/six.c @@ -267,15 +267,21 @@ static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type #endif noinline -static void __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type) +static int __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type, + six_lock_should_sleep_fn should_sleep_fn, void *p) { const struct six_lock_vals l[] = LOCK_VALS; union six_lock_state old, new; struct six_lock_waiter wait; + int ret = 0; u64 v; + ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0; + if (ret) + return ret; + if (six_optimistic_spin(lock, type)) - return; + return 0; lock_contended(&lock->dep_map, _RET_IP_); @@ -292,6 +298,10 @@ static void __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type t raw_spin_unlock(&lock->wait_lock); } + ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0; + if (ret) + break; + v = READ_ONCE(lock->state.v); do { new.v = old.v = v; @@ -311,7 +321,8 @@ static void __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type t schedule(); } - six_set_owner(lock, type, old); + if (!ret) + six_set_owner(lock, type, old); __set_current_state(TASK_RUNNING); @@ -320,18 +331,28 @@ static void __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type t list_del_init(&wait.list); raw_spin_unlock(&lock->wait_lock); } + + return ret; } __always_inline -static void __six_lock_type(struct six_lock *lock, enum six_lock_type type) +static int __six_lock_type(struct six_lock *lock, enum six_lock_type type, + six_lock_should_sleep_fn should_sleep_fn, void *p) { + int ret; + if (type != SIX_LOCK_write) six_acquire(&lock->dep_map, 0); - if (!do_six_trylock_type(lock, type)) - __six_lock_type_slowpath(lock, type); + ret = do_six_trylock_type(lock, type) ? 0 + : __six_lock_type_slowpath(lock, type, should_sleep_fn, p); - lock_acquired(&lock->dep_map, _RET_IP_); + if (ret && type != SIX_LOCK_write) + six_release(&lock->dep_map); + if (!ret) + lock_acquired(&lock->dep_map, _RET_IP_); + + return ret; } static inline void six_lock_wakeup(struct six_lock *lock, @@ -417,9 +438,10 @@ bool six_relock_##type(struct six_lock *lock, u32 seq) \ } \ EXPORT_SYMBOL_GPL(six_relock_##type); \ \ -void six_lock_##type(struct six_lock *lock) \ +int six_lock_##type(struct six_lock *lock, \ + six_lock_should_sleep_fn should_sleep_fn, void *p) \ { \ - __six_lock_type(lock, SIX_LOCK_##type); \ + return __six_lock_type(lock, SIX_LOCK_##type, should_sleep_fn, p);\ } \ EXPORT_SYMBOL_GPL(six_lock_##type); \ \ @@ -514,3 +536,18 @@ void six_lock_increment(struct six_lock *lock, enum six_lock_type type) } } EXPORT_SYMBOL_GPL(six_lock_increment); + +void six_lock_wakeup_all(struct six_lock *lock) +{ + struct six_lock_waiter *w; + + raw_spin_lock(&lock->wait_lock); + + list_for_each_entry(w, &lock->wait_list[0], list) + wake_up_process(w->task); + list_for_each_entry(w, &lock->wait_list[1], list) + wake_up_process(w->task); + + raw_spin_unlock(&lock->wait_lock); +} +EXPORT_SYMBOL_GPL(six_lock_wakeup_all); |