summaryrefslogtreecommitdiff
path: root/arch/arm/mach-msm/include/mach/remote_spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-msm/include/mach/remote_spinlock.h')
-rw-r--r--arch/arm/mach-msm/include/mach/remote_spinlock.h165
1 files changed, 165 insertions, 0 deletions
diff --git a/arch/arm/mach-msm/include/mach/remote_spinlock.h b/arch/arm/mach-msm/include/mach/remote_spinlock.h
new file mode 100644
index 000000000000..015b6ffd89af
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/remote_spinlock.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Part of this this code is based on the standard ARM spinlock
+ * implementation (asm/spinlock.h) found in the 2.6.29 kernel.
+ */
+
+#ifndef __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+#define __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+typedef struct {
+ volatile uint32_t lock;
+} raw_remote_spinlock_t;
+
+typedef raw_remote_spinlock_t *_remote_spinlock_t;
+
+#define remote_spin_lock_id_t uint32_t
+
+static inline void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+"1: ldrex %0, [%1]\n"
+" teq %0, #0\n"
+" strexeq %0, %2, [%1]\n"
+" teqeq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp)
+ : "r" (&lock->lock), "r" (1)
+ : "cc");
+
+ smp_mb();
+}
+
+static inline void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+ smp_mb();
+
+ __asm__ __volatile__(
+" str %1, [%0]\n"
+ :
+ : "r" (&lock->lock), "r" (0)
+ : "cc");
+}
+
+static inline void __raw_remote_swp_spin_lock(raw_remote_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+"1: swp %0, %2, [%1]\n"
+" teq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp)
+ : "r" (&lock->lock), "r" (1)
+ : "cc");
+
+ smp_mb();
+}
+
+static inline void __raw_remote_swp_spin_unlock(raw_remote_spinlock_t *lock)
+{
+ smp_mb();
+
+ __asm__ __volatile__(
+" str %1, [%0]"
+ :
+ : "r" (&lock->lock), "r" (0)
+ : "cc");
+}
+
+
+int _remote_spin_lock_init(remote_spin_lock_id_t id, _remote_spinlock_t *lock);
+
+/* Only use SWP-based spinlocks for ARM11 apps processors where the LDREX/STREX
+ * instructions are unable to lock shared memory for exclusive access. */
+#if defined(CONFIG_ARCH_MSM_ARM11)
+#define _remote_spin_lock(lock) __raw_remote_swp_spin_lock(*lock)
+#define _remote_spin_unlock(lock) __raw_remote_swp_spin_unlock(*lock)
+#else
+#define _remote_spin_lock(lock) __raw_remote_ex_spin_lock(*lock)
+#define _remote_spin_unlock(lock) __raw_remote_ex_spin_unlock(*lock)
+#endif /* CONFIG_ARCH_MSM_ARM11 */
+
+
+/* Grabbing a local spin lock before going for a remote lock has several
+ * advantages:
+ * 1. Get calls to preempt enable/disable and IRQ save/restore for free.
+ * 2. For UP kernel, there is no overhead.
+ * 3. Reduces the possibility of executing the remote spin lock code. This is
+ * especially useful when the remote CPUs' mutual exclusion instructions
+ * don't work with the local CPUs' instructions. In such cases, one has to
+ * use software based mutex algorithms (e.g. Lamport's bakery algorithm)
+ * which could get expensive when the no. of contending CPUs is high.
+ * 4. In the case of software based mutex algorithm the exection time will be
+ * smaller since the no. of contending CPUs is reduced by having just one
+ * contender for all the local CPUs.
+ * 5. Get most of the spin lock debug features for free.
+ * 6. The code will continue to work "gracefully" even when the remote spin
+ * lock code is stubbed out for debug purposes or when there is no remote
+ * CPU in some board/machine types.
+ */
+typedef struct {
+ spinlock_t local;
+ _remote_spinlock_t remote;
+} remote_spinlock_t;
+
+#define remote_spin_lock_init(lock,id) \
+ do { \
+ spin_lock_init(&((lock)->local)); \
+ _remote_spin_lock_init(id, &((lock)->remote)); \
+ } while (0)
+#define remote_spin_lock(lock) \
+ do { \
+ spin_lock(&((lock)->local)); \
+ _remote_spin_lock(&((lock)->remote)); \
+ } while (0)
+#define remote_spin_unlock(lock) \
+ do { \
+ _remote_spin_unlock(&((lock)->remote)); \
+ spin_unlock(&((lock)->local)); \
+ } while (0)
+#define remote_spin_lock_irqsave(lock, flags) \
+ do { \
+ spin_lock_irqsave(&((lock)->local), flags); \
+ _remote_spin_lock(&((lock)->remote)); \
+ } while (0)
+#define remote_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ _remote_spin_unlock(&((lock)->remote)); \
+ spin_unlock_irqrestore(&((lock)->local), flags); \
+ } while (0)
+
+#endif /* __ASM__ARCH_QC_REMOTE_SPINLOCK_H */
+