summaryrefslogtreecommitdiff
path: root/kernel/locking/qrwlock.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-12-21 03:14:03 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-12-21 03:14:03 +0100
commite219aafe50fd546b8686582ddbafd24c3c2eda04 (patch)
treea665e08ca8d42f20ac8e5dcc8565d666cffb26bd /kernel/locking/qrwlock.c
parentffe12855a5f7f195589130197558e6a5c276caa4 (diff)
parent24c96dc79c5e76d3cff7a33f955a4d3244644766 (diff)
Merge back earlier 'pm-domains' material for v4.5.
Diffstat (limited to 'kernel/locking/qrwlock.c')
-rw-r--r--kernel/locking/qrwlock.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index f17a3e3b3550..fec082338668 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -86,7 +86,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
/*
* Put the reader into the wait queue
*/
- arch_spin_lock(&lock->lock);
+ arch_spin_lock(&lock->wait_lock);
/*
* The ACQUIRE semantics of the following spinning code ensure
@@ -99,7 +99,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
/*
* Signal the next one in queue to become queue head
*/
- arch_spin_unlock(&lock->lock);
+ arch_spin_unlock(&lock->wait_lock);
}
EXPORT_SYMBOL(queued_read_lock_slowpath);
@@ -112,7 +112,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
u32 cnts;
/* Put the writer into the wait queue */
- arch_spin_lock(&lock->lock);
+ arch_spin_lock(&lock->wait_lock);
/* Try to acquire the lock directly if no reader is present */
if (!atomic_read(&lock->cnts) &&
@@ -144,6 +144,6 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
cpu_relax_lowlatency();
}
unlock:
- arch_spin_unlock(&lock->lock);
+ arch_spin_unlock(&lock->wait_lock);
}
EXPORT_SYMBOL(queued_write_lock_slowpath);