summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-06-28 13:35:48 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2017-06-28 15:23:12 +0100
commit7b92c1bd0540b64f54d98331d67e57266f9343c4 (patch)
treec2ec13c790024d982bbffe17fe9c44c01c3dc872 /drivers/gpu/drm/i915/i915_gem.c
parent13f8458f9a4522bcb7d1856dd8b329ff5d90f887 (diff)
drm/i915: Avoid keeping waitboost active for signaling threads
Once a client has requested a waitboost, we keep that waitboost active until all clients are no longer waiting. This is because we don't distinguish which waiter deserves the boost. However, with the advent of fence signaling, the signaler threads appear as waiters to the RPS interrupt handler. So instead of using a single boolean to track when to keep the waitboost active, use a counter of all outstanding waitboosted requests. At this point, I have removed all vestiges of the rate limiting on clients. Whilst this means that compositors should remain more fluid, it also means that boosts are more prevalent. See commit b29c19b64528 ("drm/i915: Boost RPS frequency for CPU stalls") for a longer discussion on the pros and cons of both approaches. A drawback of this implementation is that it requires constant request submission to keep the waitboost trimmed (as it is now cancelled when the request is completed). This will be fine for a busy system, but near idle the boosts may be kept for longer than desired (effectively tens of vblanks worstcase) and there is a reliance on rc6 instead. v2: Remove defunct rps.client_lock Reported-by: Michał Winiarski <michal.winiarski@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170628123548.9236-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
1 files changed, 1 insertions, 24 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f38c84e485ab..1b2dfa8bdeef 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -388,7 +388,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
*/
if (rps) {
if (INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
+ gen6_rps_boost(rq, rps);
else
rps = NULL;
}
@@ -399,22 +399,6 @@ out:
if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
i915_gem_request_retire_upto(rq);
- if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) {
- /* The GPU is now idle and this client has stalled.
- * Since no other client has submitted a request in the
- * meantime, assume that this client is the only one
- * supplying work to the GPU but is unable to keep that
- * work supplied because it is waiting. Since the GPU is
- * then never kept fully busy, RPS autoclocking will
- * keep the clocks relatively low, causing further delays.
- * Compensate by giving the synchronous client credit for
- * a waitboost next time.
- */
- spin_lock(&rq->i915->rps.client_lock);
- list_del_init(&rps->link);
- spin_unlock(&rq->i915->rps.client_lock);
- }
-
return timeout;
}
@@ -5053,12 +5037,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
list_for_each_entry(request, &file_priv->mm.request_list, client_link)
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
-
- if (!list_empty(&file_priv->rps.link)) {
- spin_lock(&to_i915(dev)->rps.client_lock);
- list_del(&file_priv->rps.link);
- spin_unlock(&to_i915(dev)->rps.client_lock);
- }
}
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
@@ -5075,7 +5053,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
file->driver_priv = file_priv;
file_priv->dev_priv = i915;
file_priv->file = file;
- INIT_LIST_HEAD(&file_priv->rps.link);
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);