summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2018-08-22 11:49:04 +0200
committerTejun Heo <tj@kernel.org>2018-08-22 08:31:38 -0700
commit87915adc3f0acdf03c776df42e308e5a155c19af (patch)
treecce568b654b2bb6e7d893655c9693b370dcd98bd /kernel
parentd6e89786bed977f37f55ffca11e563f6d2b1e3b5 (diff)
workqueue: re-add lockdep dependencies for flushing
In flush_work(), we need to create a lockdep dependency so that the following scenario is appropriately tagged as a problem: work_function() { mutex_lock(&mutex); ... } other_function() { mutex_lock(&mutex); flush_work(&work); // or cancel_work_sync(&work); } This is a problem since the work might be running and be blocked on trying to acquire the mutex. Similarly, in flush_workqueue(). These were removed after cross-release partially caught these problems, but now cross-release was reverted anyway. IMHO the removal was erroneous anyway though, since lockdep should be able to catch potential problems, not just actual ones, and cross-release would only have caught the problem when actually invoking wait_for_completion(). Fixes: fd1a5b04dfb8 ("workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes") Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index aa520e715bbc..661184fcd503 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2652,6 +2652,9 @@ void flush_workqueue(struct workqueue_struct *wq)
if (WARN_ON(!wq_online))
return;
+ lock_map_acquire(&wq->lockdep_map);
+ lock_map_release(&wq->lockdep_map);
+
mutex_lock(&wq->mutex);
/*
@@ -2905,6 +2908,11 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
if (WARN_ON(!wq_online))
return false;
+ if (!from_cancel) {
+ lock_map_acquire(&work->lockdep_map);
+ lock_map_release(&work->lockdep_map);
+ }
+
if (start_flush_work(work, &barr, from_cancel)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);