summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPhilipp Hachtmann <phacht@linux.vnet.ibm.com>2014-04-07 18:25:23 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-09 10:19:22 +0200
commitc3d6fe011f4792ae9b51a4f0a83ce11216f7a390 (patch)
treef7a92da020d1ef20e0cd73929713dbf6541c616f /arch
parent1135482aec4e83bf0ece023086cb61f35c6e742c (diff)
s390/spinlock: Implement ticket locks (8 Bit tickets)
This patch adds a ticket lock spinlock implementation to s390. It is controlled by CONFIG_S390_TICKET_SPINLOCK. The size of arch_spinlock_t is kept (32 bit). Therefore the lock tickets can have only 8 bits each. Therefore the lock implementation stops to be fair when the lock is taken and more than 253 CPUs are waiting for it. But this can be considered a rare case. [ Martin Schwidefsky: patch breakdown and code beautification ] Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig10
-rw-r--r--arch/s390/include/asm/spinlock.h37
-rw-r--r--arch/s390/include/asm/spinlock_types.h17
-rw-r--r--arch/s390/lib/spinlock.c88
4 files changed, 147 insertions, 5 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 00fdad6ea202..59c4db0bfc0b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -329,6 +329,16 @@ config SYSVIPC_COMPAT
config KEYS_COMPAT
def_bool y if COMPAT && KEYS
+config S390_TICKET_SPINLOCK
+ bool "Use ticket spinlocks"
+ depends on S390 && SMP
+ default n
+ help
+ This enables support for ticket spinlocks. Ticket spinlocks
+ are more fair by means that waiting CPUs will get the lock
+ in the order they tried to obtain it.
+ The tradeoff is more complex code that could impact performance.
+
config SMP
def_bool y
prompt "Symmetric multi-processing support"
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 5a0b2882ad48..84faa13aa7ae 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -1,7 +1,8 @@
/*
* S390 version
- * Copyright IBM Corp. 1999
+ * Copyright IBM Corp. 1999, 2014
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Philipp Hachtmann (phacht@linux.vnet.ibm.com)
*
* Derived from "include/asm-i386/spinlock.h"
*/
@@ -32,14 +33,35 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
- * We make no fairness assumptions. They have a cost.
- *
* (the type definitions are in asm/spinlock_types.h)
*/
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
void arch_spin_relax(arch_spinlock_t *);
+
+#ifdef CONFIG_S390_TICKET_SPINLOCK
+
+void arch_spin_unlock_slow(arch_spinlock_t *lp);
+
+static inline u32 arch_spin_lockval(u32 cpu)
+{
+ arch_spinlock_t new;
+
+ new.tickets.owner = ~cpu;
+ new.tickets.head = 0;
+ new.tickets.tail = 0;
+ return new.lock;
+}
+
+static inline void arch_spin_lock_wait_flags(arch_spinlock_t *lp,
+ unsigned long flags)
+{
+ arch_spin_lock_wait(lp);
+}
+
+#else /* CONFIG_S390_TICKET_SPINLOCK */
+
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
static inline u32 arch_spin_lockval(int cpu)
@@ -47,6 +69,12 @@ static inline u32 arch_spin_lockval(int cpu)
return ~cpu;
}
+static inline void arch_spin_unlock_slow(arch_spinlock_t *lp)
+{
+}
+
+#endif /* CONFIG_S390_TICKET_SPINLOCK */
+
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.lock == 0;
@@ -89,7 +117,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
- arch_spin_tryrelease_once(lp);
+ if (unlikely(!arch_spin_tryrelease_once(lp)))
+ arch_spin_unlock_slow(lp);
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index b2cd6ff7c2c5..472e12254a51 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -5,10 +5,27 @@
# error "please don't include this file directly"
#endif
+#ifdef CONFIG_S390_TICKET_SPINLOCK
+
+typedef struct arch_spinlock {
+ union {
+ unsigned int lock;
+ struct __raw_tickets {
+ u16 owner;
+ u8 tail;
+ u8 head;
+ } tickets;
+ };
+} arch_spinlock_t;
+
+#else /* CONFIG_S390_TICKET_SPINLOCK */
+
typedef struct {
unsigned int lock;
} __attribute__ ((aligned (4))) arch_spinlock_t;
+#endif /* CONFIG_S390_TICKET_SPINLOCK */
+
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
typedef struct {
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 3ca9de4d9cb9..0946b99fe6c3 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -1,8 +1,9 @@
/*
* Out of line spinlock code.
*
- * Copyright IBM Corp. 2004, 2006
+ * Copyright IBM Corp. 2004, 2014
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Philipp Hachtmann (phacht@linux.vnet.ibm.com)
*/
#include <linux/types.h>
@@ -24,6 +25,89 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
+#ifdef CONFIG_S390_TICKET_SPINLOCK
+
+void arch_spin_lock_wait(arch_spinlock_t *lp)
+{
+ arch_spinlock_t cur, new;
+ int cpu, owner, count;
+ u8 ticket = 0;
+
+ cpu = smp_processor_id();
+ count = spin_retry;
+ while (1) {
+ new.lock = cur.lock = ACCESS_ONCE(lp->lock);
+ if (new.lock == 0) {
+ /* The lock is free with no waiter, try to get it. */
+ new.tickets.owner = (u16) ~cpu;
+ } else if (!ticket) {
+ /* Try to get a ticket. */
+ new.tickets.tail = (u8)(new.tickets.tail + 1) ? : 1;
+ if (new.tickets.tail == new.tickets.head)
+ /* Overflow, can't get a ticket. */
+ new.tickets.tail = cur.tickets.tail;
+ } else if (new.tickets.head == ticket)
+ new.tickets.owner = (u16) ~cpu;
+ /* Do the atomic update. */
+ if (cur.lock != new.lock &&
+ _raw_compare_and_swap(&lp->lock, cur.lock, new.lock)) {
+ /* Update successful. */
+ if (new.tickets.owner == (u16) ~cpu)
+ return; /* Got the lock. */
+ ticket = new.tickets.tail; /* Got a ticket. */
+ count = 0;
+ }
+ /* Lock could not be acquired yet. */
+ if (count--)
+ continue;
+ count = spin_retry;
+ owner = cur.tickets.owner;
+ if (ticket) {
+ if (owner && smp_vcpu_scheduled(~owner)) {
+ if (MACHINE_IS_LPAR)
+ continue;
+ } else
+ count = 0;
+ }
+ /* Yield the cpu. */
+ if (owner)
+ smp_yield_cpu(~owner);
+ else
+ smp_yield();
+ }
+}
+EXPORT_SYMBOL(arch_spin_lock_wait);
+
+void arch_spin_unlock_slow(arch_spinlock_t *lp)
+{
+ arch_spinlock_t cur, new;
+
+ do {
+ cur.lock = ACCESS_ONCE(lp->lock);
+ new.lock = 0;
+ if (cur.tickets.head != cur.tickets.tail) {
+ new.tickets.tail = cur.tickets.tail;
+ new.tickets.head = (u8)(cur.tickets.head + 1) ? : 1;
+ new.tickets.owner = 0;
+ }
+ } while (!_raw_compare_and_swap(&lp->lock, cur.lock, new.lock));
+}
+EXPORT_SYMBOL(arch_spin_unlock_slow);
+
+void arch_spin_relax(arch_spinlock_t *lp)
+{
+ unsigned int cpu = lp->tickets.owner;
+
+ if (cpu != 0) {
+ if (MACHINE_IS_VM || MACHINE_IS_KVM ||
+ !smp_vcpu_scheduled(~cpu))
+ smp_yield_cpu(~cpu);
+ }
+}
+EXPORT_SYMBOL(arch_spin_relax);
+
+#else /* CONFIG_S390_TICKET_SPINLOCK */
+
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
int count = spin_retry;
@@ -94,6 +178,8 @@ void arch_spin_relax(arch_spinlock_t *lp)
}
EXPORT_SYMBOL(arch_spin_relax);
+#endif /* CONFIG_S390_TICKET_SPINLOCK */
+
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
int count;