summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2019-11-15 12:16:43 +1000
committerDave Airlie <airlied@redhat.com>2019-11-15 12:16:43 +1000
commit2d0720f5a4fc2aa5ae92f21fc113d7626b5a3c9f (patch)
tree62d241d86b32eede235b84d7e5a67e79215c800f /drivers/gpu/drm/i915/gt
parentdfce90259d74d34cff4cb0c75ecfc0336c09520f (diff)
parent789c4aea3f08026360d026c0ea69b33797ac88c2 (diff)
Merge tag 'drm-intel-next-fixes-2019-11-14' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
- PMU "Frequency" is reported as accumulated cycles - Avoid OOPS in dumb_create IOCTL when no CRTCs - Mitigation for userptr put_pages deadlock with trylock_page - Fix to avoid freeing heartbeat request too early - Fix LRC coherency issue - Fix Bugzilla #112212: Avoid screen corruption on MST - Error path fix to unlock context on failed context VM SETPARAM - Always consider holding preemption a privileged op in perf/OA - Preload LUTs if the hw isn't currently using them to avoid color flash on VLV/CHV - Protect context while grabbing its name for the request - Don't resize aliasing ppGTT size - Smaller fixes picked by tooling Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191114085213.GA6440@jlahtine-desk.ger.corp.intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c118
3 files changed, 66 insertions, 58 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f8113bc756c6..5ca3ec911e50 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1372,6 +1372,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
execlists_active_lock_bh(execlists);
+ rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) {
char hdr[80];
int len;
@@ -1409,6 +1410,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
if (tl)
intel_timeline_put(tl);
}
+ rcu_read_unlock();
execlists_active_unlock_bh(execlists);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 5051f304705b..06aa14c7aa8c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -141,8 +141,8 @@ void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
{
- cancel_delayed_work(&engine->heartbeat.work);
- i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ if (cancel_delayed_work(&engine->heartbeat.work))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
}
void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 51aef2a233cb..0ac3b26674ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -990,6 +990,59 @@ static void intel_engine_context_out(struct intel_engine_cs *engine)
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
+static void restore_default_state(struct intel_context *ce,
+ struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+
+ if (engine->pinned_default_state)
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+
+ execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->hw_context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
+ __func__, engine->name, rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (i915_request_completed(rq))
+ head = rq->tail;
+ else
+ head = active_request(ce->timeline, rq)->head;
+ ce->ring->head = intel_ring_wrap(ce->ring, head);
+ intel_ring_update_space(ce->ring);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ restore_default_state(ce, engine);
+ __execlists_update_reg_state(ce, engine);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+}
+
static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
@@ -998,6 +1051,9 @@ __execlists_schedule_in(struct i915_request *rq)
intel_context_get(ce);
+ if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+ reset_active(rq, engine);
+
if (ce->tag) {
/* Use a fixed tag for OA and friends */
ce->lrc_desc |= (u64)ce->tag << 32;
@@ -1047,72 +1103,22 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
tasklet_schedule(&ve->base.execlists.tasklet);
}
-static void restore_default_state(struct intel_context *ce,
- struct intel_engine_cs *engine)
-{
- u32 *regs = ce->lrc_reg_state;
-
- if (engine->pinned_default_state)
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
-
- execlists_init_reg_state(regs, ce, engine, ce->ring, false);
-}
-
-static void reset_active(struct i915_request *rq,
- struct intel_engine_cs *engine)
-{
- struct intel_context * const ce = rq->hw_context;
- u32 head;
-
- /*
- * The executing context has been cancelled. We want to prevent
- * further execution along this context and propagate the error on
- * to anything depending on its results.
- *
- * In __i915_request_submit(), we apply the -EIO and remove the
- * requests' payloads for any banned requests. But first, we must
- * rewind the context back to the start of the incomplete request so
- * that we do not jump back into the middle of the batch.
- *
- * We preserve the breadcrumbs and semaphores of the incomplete
- * requests so that inter-timeline dependencies (i.e other timelines)
- * remain correctly ordered. And we defer to __i915_request_submit()
- * so that all asynchronous waits are correctly handled.
- */
- GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
- __func__, engine->name, rq->fence.context, rq->fence.seqno);
-
- /* On resubmission of the active request, payload will be scrubbed */
- if (i915_request_completed(rq))
- head = rq->tail;
- else
- head = active_request(ce->timeline, rq)->head;
- ce->ring->head = intel_ring_wrap(ce->ring, head);
- intel_ring_update_space(ce->ring);
-
- /* Scrub the context image to prevent replaying the previous batch */
- restore_default_state(ce, engine);
- __execlists_update_reg_state(ce, engine);
-
- /* We've switched away, so this should be a no-op, but intent matters */
- ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
-}
-
static inline void
__execlists_schedule_out(struct i915_request *rq,
struct intel_engine_cs * const engine)
{
struct intel_context * const ce = rq->hw_context;
+ /*
+ * NB process_csb() is not under the engine->active.lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
+ */
+
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
intel_gt_pm_put(engine->gt);
- if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
- reset_active(rq, engine);
-
/*
* If this is part of a virtual engine, its next request may
* have been blocked waiting for access to the active context.