From 502b83be8213615aa9cec0b24b71035a36b4f017 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Fri, 14 Aug 2015 15:35:10 -0700 Subject: ipc/sem.c: update/correct memory barriers commit 3ed1f8a99d70ea1cd1508910eb107d0edcae5009 upstream. sem_lock() did not properly pair memory barriers: !spin_is_locked() and spin_unlock_wait() are both only control barriers. The code needs an acquire barrier, otherwise the cpu might perform read operations before the lock test. As no primitive exists inside and since it seems noone wants another primitive, the code creates a local primitive within ipc/sem.c. With regards to -stable: The change of sem_wait_array() is a bugfix, the change to sem_lock() is a nop (just a preprocessor redefinition to improve the readability). The bugfix is necessary for all kernels that use sem_wait_array() (i.e.: starting from 3.10). Signed-off-by: Manfred Spraul Reported-by: Oleg Nesterov Acked-by: Peter Zijlstra (Intel) Cc: "Paul E. McKenney" Cc: Kirill Tkhai Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- ipc/sem.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'ipc') diff --git a/ipc/sem.c b/ipc/sem.c index 45e1de73f4f8..e53c96f7db42 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head) ipc_rcu_free(head); } +/* + * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they + * are only control barriers. + * The code must pair with spin_unlock(&sem->lock) or + * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient. + * + * smp_rmb() is sufficient, as writes cannot pass the control barrier. + */ +#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb() + /* * Wait until all currently ongoing simple ops have completed. * Caller must own sem_perm.lock. @@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma) sem = sma->sem_base + i; spin_unlock_wait(&sem->lock); } + ipc_smp_acquire__after_spin_is_unlocked(); } /* @@ -326,8 +337,13 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, /* Then check that the global lock is free */ if (!spin_is_locked(&sma->sem_perm.lock)) { - /* spin_is_locked() is not a memory barrier */ - smp_mb(); + /* + * We need a memory barrier with acquire semantics, + * otherwise we can race with another thread that does: + * complex_count++; + * spin_unlock(sem_perm.lock); + */ + ipc_smp_acquire__after_spin_is_unlocked(); /* Now repeat the test of complex_count: * It can't change anymore until we drop sem->lock. -- cgit v1.2.3