diff options
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_lmem.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_reset.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_ring.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_ring.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_ring_submission.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/selftest_migrate.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/selftest_rc6.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_log.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_huc.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_huc.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_active.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_active.h | 1 |
13 files changed, 39 insertions, 101 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 3198b64ad7db..388f90784d8a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -53,29 +53,6 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) } /** - * __i915_gem_object_is_lmem - Whether the object is resident in - * lmem while in the fence signaling critical path. - * @obj: The object to check. - * - * This function is intended to be called from within the fence signaling - * path where the fence, or a pin, keeps the object from being migrated. For - * example during gpu reset or similar. - * - * Return: Whether the object is resident in lmem. - */ -bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) -{ - struct intel_memory_region *mr = READ_ONCE(obj->mm.region); - -#ifdef CONFIG_LOCKDEP - GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) && - i915_gem_object_evictable(obj)); -#endif - return mr && (mr->type == INTEL_MEMORY_LOCAL || - mr->type == INTEL_MEMORY_STOLEN_LOCAL); -} - -/** * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the * minimum page size for the backing pages. * @i915: The i915 instance. diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index 5a7a14e85c3f..ecd8f1a633a1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -19,8 +19,6 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); -bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); - struct drm_i915_gem_object * i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915, const void *data, size_t size); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index c2fe3fc78e76..aae5a081cb53 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1113,6 +1113,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) * Warn CI about the unrecoverable wedged condition. * Time for a reboot. */ + gt_err(gt, "Unrecoverable wedged condition\n"); add_taint_for_CI(gt->i915, TAINT_WARN); return false; } @@ -1264,8 +1265,10 @@ void intel_gt_reset(struct intel_gt *gt, } ret = resume(gt); - if (ret) + if (ret) { + gt_err(gt, "Failed to resume (%d)\n", ret); goto taint; + } finish: reset_finish(gt, awake); @@ -1608,6 +1611,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) set_bit(I915_WEDGED_ON_INIT, >->reset.flags); /* Wedged on init is non-recoverable */ + gt_err(gt, "Non-recoverable wedged on init\n"); add_taint_for_CI(gt->i915, TAINT_WARN); } diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 59da4b7bd262..b74d9205c0f5 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -308,30 +308,6 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) return cs; } -/* Align the ring tail to a cacheline boundary */ -int intel_ring_cacheline_align(struct i915_request *rq) -{ - int num_dwords; - void *cs; - - num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); - if (num_dwords == 0) - return 0; - - num_dwords = CACHELINE_DWORDS - num_dwords; - GEM_BUG_ON(num_dwords & 1); - - cs = intel_ring_begin(rq, num_dwords); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); - intel_ring_advance(rq, cs + num_dwords); - - GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); - return 0; -} - #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_ring.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h index 1b32dadfb8c3..64b322e25f36 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.h +++ b/drivers/gpu/drm/i915/gt/intel_ring.h @@ -16,7 +16,6 @@ struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, int size); u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords); -int intel_ring_cacheline_align(struct i915_request *rq); unsigned int intel_ring_update_space(struct intel_ring *ring); diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 32f3b52a183a..458e29d89978 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -26,6 +26,7 @@ #include "shmem_utils.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" +#include "intel_gt_print.h" /* Rough estimate of the typical request size, performing a flush, * set-context and then emitting the batch. @@ -230,8 +231,13 @@ static int xcs_resume(struct intel_engine_cs *engine) set_pp_dir(engine); - /* First wake the ring up to an empty/idle ring */ - for ((kt) = ktime_get() + (2 * NSEC_PER_MSEC); + /* + * First wake the ring up to an empty/idle ring. + * Use 50ms of delay to let the engine write successfully + * for all platforms. Experimented with different values and + * determined that 50ms works best based on testing. + */ + for ((kt) = ktime_get() + (50 * NSEC_PER_MSEC); ktime_before(ktime_get(), (kt)); cpu_relax()) { /* * In case of resets fails because engine resumes from @@ -282,16 +288,16 @@ static int xcs_resume(struct intel_engine_cs *engine) return 0; err: - drm_err(&engine->i915->drm, - "%s initialization failed; " - "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", - engine->name, - ENGINE_READ(engine, RING_CTL), - ENGINE_READ(engine, RING_CTL) & RING_VALID, - ENGINE_READ(engine, RING_HEAD), ring->head, - ENGINE_READ(engine, RING_TAIL), ring->tail, - ENGINE_READ(engine, RING_START), - i915_ggtt_offset(ring->vma)); + gt_err(engine->gt, "%s initialization failed\n", engine->name); + ENGINE_TRACE(engine, + "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_CTL) & RING_VALID, + ENGINE_READ(engine, RING_HEAD), ring->head, + ENGINE_READ(engine, RING_TAIL), ring->tail, + ENGINE_READ(engine, RING_START), + i915_ggtt_offset(ring->vma)); + GEM_TRACE_DUMP(); return -EIO; } diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index ca460cee4f8b..1bf7b88d9a9d 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -262,7 +262,7 @@ static int clear(struct intel_migrate *migrate, { struct drm_i915_private *i915 = migrate->context->engine->i915; struct drm_i915_gem_object *obj; - struct i915_request *rq; + struct i915_request *rq = NULL; struct i915_gem_ww_ctx ww; u32 *vaddr, val = 0; bool ccs_cap = false; diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index 1aa1446c8fb0..27b6d51ef145 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -8,6 +8,7 @@ #include "intel_gpu_commands.h" #include "intel_gt_requests.h" #include "intel_ring.h" +#include "intel_rps.h" #include "selftest_rc6.h" #include "selftests/i915_random.h" @@ -38,6 +39,9 @@ int live_rc6_manual(void *arg) ktime_t dt; u64 res[2]; int err = 0; + u32 rc0_freq = 0; + u32 rc6_freq = 0; + struct intel_rps *rps = >->rps; /* * Our claim is that we can "encourage" the GPU to enter rc6 at will. @@ -66,6 +70,7 @@ int live_rc6_manual(void *arg) rc0_power = librapl_energy_uJ() - rc0_power; dt = ktime_sub(ktime_get(), dt); res[1] = rc6_residency(rc6); + rc0_freq = intel_rps_read_actual_frequency_fw(rps); if ((res[1] - res[0]) >> 10) { pr_err("RC6 residency increased by %lldus while disabled for 1000ms!\n", (res[1] - res[0]) >> 10); @@ -77,7 +82,11 @@ int live_rc6_manual(void *arg) rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt)); if (!rc0_power) { - pr_err("No power measured while in RC0\n"); + if (rc0_freq) + pr_debug("No power measured while in RC0! GPU Freq: %u in RC0\n", + rc0_freq); + else + pr_err("No power and freq measured while in RC0\n"); err = -EINVAL; goto out_unlock; } @@ -90,7 +99,8 @@ int live_rc6_manual(void *arg) intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL); dt = ktime_get(); rc6_power = librapl_energy_uJ(); - msleep(100); + msleep(1000); + rc6_freq = intel_rps_read_actual_frequency_fw(rps); rc6_power = librapl_energy_uJ() - rc6_power; dt = ktime_sub(ktime_get(), dt); res[1] = rc6_residency(rc6); @@ -108,7 +118,8 @@ int live_rc6_manual(void *arg) pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n", rc0_power, rc6_power); if (2 * rc6_power > rc0_power) { - pr_err("GPU leaked energy while in RC6!\n"); + pr_err("GPU leaked energy while in RC6! GPU Freq: %u in RC6 and %u in RC0\n", + rc6_freq, rc0_freq); err = -EINVAL; goto out_unlock; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 222c95f62156..e8a04e476c57 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -18,7 +18,7 @@ #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M -#elif defined(CONFIG_DRM_I915_DEBUG_GEM) +#elif IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index d7ac31c3254c..b3cbf85c00cb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -427,19 +427,6 @@ void intel_huc_fini(struct intel_huc *huc) intel_uc_fw_fini(&huc->fw); } -void intel_huc_suspend(struct intel_huc *huc) -{ - if (!intel_uc_fw_is_loadable(&huc->fw)) - return; - - /* - * in the unlikely case that we're suspending before the GSC has - * completed its loading sequence, just stop waiting. We'll restart - * on resume. - */ - delayed_huc_load_complete(huc); -} - static const char *auth_mode_string(struct intel_huc *huc, enum intel_huc_authentication_type type) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index ba5cb08e9e7b..d5e441b9e08d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -57,7 +57,6 @@ int intel_huc_sanitize(struct intel_huc *huc); void intel_huc_init_early(struct intel_huc *huc); int intel_huc_init(struct intel_huc *huc); void intel_huc_fini(struct intel_huc *huc); -void intel_huc_suspend(struct intel_huc *huc); int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type); int intel_huc_wait_for_auth_complete(struct intel_huc *huc, enum intel_huc_authentication_type type); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 35319228bc51..0dbc4e289300 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -527,24 +527,6 @@ int i915_active_acquire(struct i915_active *ref) return err; } -int i915_active_acquire_for_context(struct i915_active *ref, u64 idx) -{ - struct i915_active_fence *active; - int err; - - err = i915_active_acquire(ref); - if (err) - return err; - - active = active_instance(ref, idx); - if (!active) { - i915_active_release(ref); - return -ENOMEM; - } - - return 0; /* return with active ref */ -} - void i915_active_release(struct i915_active *ref) { debug_active_assert(ref); diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index 77c676ecc263..821f7c21ea9b 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -186,7 +186,6 @@ int i915_request_await_active(struct i915_request *rq, #define I915_ACTIVE_AWAIT_BARRIER BIT(2) int i915_active_acquire(struct i915_active *ref); -int i915_active_acquire_for_context(struct i915_active *ref, u64 idx); bool i915_active_acquire_if_busy(struct i915_active *ref); void i915_active_release(struct i915_active *ref); |