summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_drv.h
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-07-18 14:41:24 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2017-07-19 13:19:24 +0100
commit3b19f16a556446c144a1f921444931b0cf9447ab (patch)
treecffdfd025ad7022405dad16291684d84ac9185da /drivers/gpu/drm/i915/i915_drv.h
parent023f807989f4d6a076258f4efe6d1da0d7aaeb29 (diff)
drm/i915: Drain the device workqueue on unload
Workers on the i915->wq may rearm themselves so for completeness we need to replace our flush_workqueue() with a call to drain_workqueue() before unloading the device. v2: Reinforce the drain_workqueue with an preceding rcu_barrier() as a few of the tasks that need to be drained may first be armed by RCU. References: https://bugs.freedesktop.org/show_bug.cgi?id=101627 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170718134124.14832-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h20
1 files changed, 20 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 559fdc7bb393..017361833c58 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3300,6 +3300,26 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
} while (flush_work(&i915->mm.free_work));
}
+static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+ /*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in
+ * general we have workers that are armed by RCU and then rearm
+ * themselves in their callbacks. To be paranoid, we need to
+ * drain the workqueue a second time after waiting for the RCU
+ * grace period so that we catch work queued via RCU from the first
+ * pass. As neither drain_workqueue() nor flush_workqueue() report
+ * a result, we make an assumption that we only don't require more
+ * than 2 passes to catch all recursive RCU delayed work.
+ *
+ */
+ int pass = 2;
+ do {
+ rcu_barrier();
+ drain_workqueue(i915->wq);
+ } while (--pass);
+}
+
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,