From ab98e94435abc493c8fedf5e07b0b3f045424d32 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 8 Feb 2019 22:05:27 +0200 Subject: drm/i915: Dump skl+ watermark changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we're only dumping out the ddb allocation changes, let's do the same for the watermarks. This should help with debugging underruns and whatnot. First I tried one line per plane per wm level, but that resulted in an obnoxious amount of lines printed. So as a compromise I settled on a four line format, each line containing a single watermark related value (enable,lines,blocks,min_ddb_alloc) for all 8 levels (+trans wm). It still produces quite a lot of output but I can't really see a way around that because we simply have a lot of data to dump. Let's also pimp the ddb debug to print the size of the allocations too, not just their bounds. Makes it a bit easier to compare against the watermarks. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190208200527.12844-1-ville.syrjala@linux.intel.com Reviewed-by: Clint Taylor --- drivers/gpu/drm/i915/intel_pm.c | 86 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 83 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_pm.c') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0f15685529a0..279031502d43 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5266,6 +5266,11 @@ skl_compute_ddb(struct intel_atomic_state *state) return 0; } +static char enast(bool enable) +{ + return enable ? '*' : ' '; +} + static void skl_print_wm_changes(struct intel_atomic_state *state) { @@ -5276,8 +5281,16 @@ skl_print_wm_changes(struct intel_atomic_state *state) struct intel_crtc *crtc; int i; + if ((drm_debug & DRM_UT_KMS) == 0) + return; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; + + old_pipe_wm = &old_crtc_state->wm.skl.optimal; + new_pipe_wm = &new_crtc_state->wm.skl.optimal; + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { enum plane_id plane_id = plane->id; const struct skl_ddb_entry *old, *new; @@ -5288,10 +5301,77 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_ddb_entry_equal(old, new)) continue; - DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", + DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", + plane->base.base.id, plane->base.name, + old->start, old->end, new->start, new->end, + skl_ddb_entry_size(old), skl_ddb_entry_size(new)); + } + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + enum plane_id plane_id = plane->id; + const struct skl_plane_wm *old_wm, *new_wm; + + old_wm = &old_pipe_wm->planes[plane_id]; + new_wm = &new_pipe_wm->planes[plane_id]; + + if (skl_plane_wm_equals(dev_priv, old_wm, new_wm)) + continue; + + DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm" + " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en), + enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en), + enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en), + enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en), + enast(old_wm->trans_wm.plane_en), + enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en), + enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en), + enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en), + enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), + enast(new_wm->trans_wm.plane_en)); + + DRM_DEBUG_KMS("[PLANE:%d:%s] lines %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].plane_res_l, old_wm->wm[1].plane_res_l, + old_wm->wm[2].plane_res_l, old_wm->wm[3].plane_res_l, + old_wm->wm[4].plane_res_l, old_wm->wm[5].plane_res_l, + old_wm->wm[6].plane_res_l, old_wm->wm[7].plane_res_l, + old_wm->trans_wm.plane_res_l, + new_wm->wm[0].plane_res_l, new_wm->wm[1].plane_res_l, + new_wm->wm[2].plane_res_l, new_wm->wm[3].plane_res_l, + new_wm->wm[4].plane_res_l, new_wm->wm[5].plane_res_l, + new_wm->wm[6].plane_res_l, new_wm->wm[7].plane_res_l, + new_wm->trans_wm.plane_res_l); + + DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b, + old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, + old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b, + old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b, + old_wm->trans_wm.plane_res_b, + new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b, + new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b, + new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b, + new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b, + new_wm->trans_wm.plane_res_b); + + DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane->base.base.id, plane->base.name, - old->start, old->end, - new->start, new->end); + old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, + old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, + old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, + old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, + old_wm->trans_wm.min_ddb_alloc, + new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, + new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, + new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, + new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, + new_wm->trans_wm.min_ddb_alloc); } } } -- cgit v1.2.3 From 62eb3c24b37cb5d1c9dbf65f619a02b24643b229 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 13 Feb 2019 09:25:04 +0000 Subject: drm/i915: Apply rps waitboosting for dma_fence_wait_timeout() As time goes by, usage of generic ioctls such as drm_syncobj and sync_file are on the increase bypassing i915-specific ioctls like GEM_WAIT. Currently, we only apply waitboosting to our driver ioctls as we track the file/client and account the waitboosting to them. However, since commit 7b92c1bd0540 ("drm/i915: Avoid keeping waitboost active for signaling threads"), we no longer have been applying the client ratelimiting on waitboosts and so that information has only been used for debug tracking. Push the application of waitboosting down to the common i915_request_wait, and apply it to all foreign fence waits as well. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Eero Tamminen Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190213092504.25709-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 19 +------- drivers/gpu/drm/i915/i915_drv.h | 7 +-- drivers/gpu/drm/i915/i915_gem.c | 86 ++++++++---------------------------- drivers/gpu/drm/i915/i915_request.c | 21 ++++++++- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_drv.h | 2 +- drivers/gpu/drm/i915/intel_pm.c | 5 +-- 7 files changed, 44 insertions(+), 98 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_pm.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4c4876967cd6..ca8fa4461fc9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2019,11 +2019,9 @@ static const char *rps_power_to_str(unsigned int power) static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; struct intel_rps *rps = &dev_priv->gt_pm.rps; u32 act_freq = rps->cur_freq; intel_wakeref_t wakeref; - struct drm_file *file; with_intel_runtime_pm_if_in_use(dev_priv, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { @@ -2057,22 +2055,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) intel_gpu_freq(dev_priv, rps->efficient_freq), intel_gpu_freq(dev_priv, rps->boost_freq)); - mutex_lock(&dev->filelist_mutex); - list_for_each_entry_reverse(file, &dev->filelist, lhead) { - struct drm_i915_file_private *file_priv = file->driver_priv; - struct task_struct *task; - - rcu_read_lock(); - task = pid_task(file->pid, PIDTYPE_PID); - seq_printf(m, "%s [%d]: %d boosts\n", - task ? task->comm : "", - task ? task->pid : -1, - atomic_read(&file_priv->rps_client.boosts)); - rcu_read_unlock(); - } - seq_printf(m, "Kernel (anonymous) boosts: %d\n", - atomic_read(&rps->boosts)); - mutex_unlock(&dev->filelist_mutex); + seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 380b994fe5dc..17fe942eaafa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -217,10 +217,6 @@ struct drm_i915_file_private { } mm; struct idr context_idr; - struct intel_rps_client { - atomic_t boosts; - } rps_client; - unsigned int bsd_engine; /* @@ -3056,8 +3052,7 @@ void i915_gem_resume(struct drm_i915_private *dev_priv); vm_fault_t i915_gem_fault(struct vm_fault *vmf); int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, - long timeout, - struct intel_rps_client *rps); + long timeout); int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bf46c52229a8..5c1b9d44b7d3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -416,8 +416,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) static long i915_gem_object_wait_fence(struct dma_fence *fence, unsigned int flags, - long timeout, - struct intel_rps_client *rps_client) + long timeout) { struct i915_request *rq; @@ -435,27 +434,6 @@ i915_gem_object_wait_fence(struct dma_fence *fence, if (i915_request_completed(rq)) goto out; - /* - * This client is about to stall waiting for the GPU. In many cases - * this is undesirable and limits the throughput of the system, as - * many clients cannot continue processing user input/output whilst - * blocked. RPS autotuning may take tens of milliseconds to respond - * to the GPU load and thus incurs additional latency for the client. - * We can circumvent that by promoting the GPU frequency to maximum - * before we wait. This makes the GPU throttle up much more quickly - * (good for benchmarks and user experience, e.g. window animations), - * but at a cost of spending more power processing the workload - * (bad for battery). Not all clients even want their results - * immediately and for them we should just let the GPU select its own - * frequency to maximise efficiency. To prevent a single client from - * forcing the clocks too high for the whole system, we only allow - * each client to waitboost once in a busy period. - */ - if (rps_client && !i915_request_started(rq)) { - if (INTEL_GEN(rq->i915) >= 6) - gen6_rps_boost(rq, rps_client); - } - timeout = i915_request_wait(rq, flags, timeout); out: @@ -468,8 +446,7 @@ out: static long i915_gem_object_wait_reservation(struct reservation_object *resv, unsigned int flags, - long timeout, - struct intel_rps_client *rps_client) + long timeout) { unsigned int seq = __read_seqcount_begin(&resv->seq); struct dma_fence *excl; @@ -487,8 +464,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, for (i = 0; i < count; i++) { timeout = i915_gem_object_wait_fence(shared[i], - flags, timeout, - rps_client); + flags, timeout); if (timeout < 0) break; @@ -514,8 +490,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, } if (excl && timeout >= 0) - timeout = i915_gem_object_wait_fence(excl, flags, timeout, - rps_client); + timeout = i915_gem_object_wait_fence(excl, flags, timeout); dma_fence_put(excl); @@ -609,30 +584,19 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, * @obj: i915 gem object * @flags: how to wait (under a lock, for all rendering or just for writes etc) * @timeout: how long to wait - * @rps_client: client (user process) to charge for any waitboosting */ int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, - long timeout, - struct intel_rps_client *rps_client) + long timeout) { might_sleep(); GEM_BUG_ON(timeout < 0); - timeout = i915_gem_object_wait_reservation(obj->resv, - flags, timeout, - rps_client); + timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout); return timeout < 0 ? timeout : 0; } -static struct intel_rps_client *to_rps_client(struct drm_file *file) -{ - struct drm_i915_file_private *fpriv = file->driver_priv; - - return &fpriv->rps_client; -} - static int i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -838,8 +802,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -891,8 +854,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -1154,8 +1116,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; @@ -1454,8 +1415,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (ret) goto err; @@ -1553,8 +1513,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, I915_WAIT_INTERRUPTIBLE | I915_WAIT_PRIORITY | (write_domain ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (err) goto out; @@ -1863,8 +1822,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) */ ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) goto err; @@ -3195,8 +3153,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) I915_WAIT_INTERRUPTIBLE | I915_WAIT_PRIORITY | I915_WAIT_ALL, - to_wait_timeout(args->timeout_ns), - to_rps_client(file)); + to_wait_timeout(args->timeout_ns)); if (args->timeout_ns > 0) { args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); @@ -3265,7 +3222,7 @@ wait_for_timelines(struct drm_i915_private *i915, * stalls, so allow the gpu to boost to maximum clocks. */ if (flags & I915_WAIT_FOR_IDLE_BOOST) - gen6_rps_boost(rq, NULL); + gen6_rps_boost(rq); timeout = i915_request_wait(rq, flags, timeout); i915_request_put(rq); @@ -3360,8 +3317,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -3423,8 +3379,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -3539,8 +3494,7 @@ restart: I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -3678,8 +3632,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; @@ -3805,8 +3758,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index c2a5c48c7541..0acd6baa3c88 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -68,7 +68,9 @@ static signed long i915_fence_wait(struct dma_fence *fence, bool interruptible, signed long timeout) { - return i915_request_wait(to_request(fence), interruptible, timeout); + return i915_request_wait(to_request(fence), + interruptible | I915_WAIT_PRIORITY, + timeout); } static void i915_fence_release(struct dma_fence *fence) @@ -1136,8 +1138,23 @@ long i915_request_wait(struct i915_request *rq, if (__i915_spin_request(rq, state, 5)) goto out; - if (flags & I915_WAIT_PRIORITY) + /* + * This client is about to stall waiting for the GPU. In many cases + * this is undesirable and limits the throughput of the system, as + * many clients cannot continue processing user input/output whilst + * blocked. RPS autotuning may take tens of milliseconds to respond + * to the GPU load and thus incurs additional latency for the client. + * We can circumvent that by promoting the GPU frequency to maximum + * before we sleep. This makes the GPU throttle up much more quickly + * (good for benchmarks and user experience, e.g. window animations), + * but at a cost of spending more power processing the workload + * (bad for battery). + */ + if (flags & I915_WAIT_PRIORITY) { + if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6) + gen6_rps_boost(rq); i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); + } wait.tsk = current; if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c496b6e0226b..59544bb5c294 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13559,7 +13559,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait, * vblank without our intervention, so leave RPS alone. */ if (!i915_request_started(rq)) - gen6_rps_boost(rq, NULL); + gen6_rps_boost(rq); i915_request_put(rq); drm_crtc_vblank_put(wait->crtc); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index dd121966613b..48e89db23c5b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -2266,7 +2266,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); -void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps); +void gen6_rps_boost(struct i915_request *rq); void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 279031502d43..af265d831011 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6768,8 +6768,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->pcu_lock); } -void gen6_rps_boost(struct i915_request *rq, - struct intel_rps_client *rps_client) +void gen6_rps_boost(struct i915_request *rq) { struct intel_rps *rps = &rq->i915->gt_pm.rps; unsigned long flags; @@ -6798,7 +6797,7 @@ void gen6_rps_boost(struct i915_request *rq, if (READ_ONCE(rps->cur_freq) < rps->boost_freq) schedule_work(&rps->work); - atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts); + atomic_inc(&rps->boosts); } int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) -- cgit v1.2.3 From c11b813f53c98e35ed257621065d6905589f78b1 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 29 Nov 2018 19:55:03 +0200 Subject: drm/i915: s/PUNIT_REG_DSPFREQ/PUNIT_REG_DSPSSPM/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename the punit display power register to match the spec. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20181129175504.3630-1-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/i915_reg.h | 2 +- drivers/gpu/drm/i915/intel_cdclk.c | 14 +++++++------- drivers/gpu/drm/i915/intel_pm.c | 6 +++--- drivers/gpu/drm/i915/intel_runtime_pm.c | 12 ++++++------ 4 files changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_pm.c') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 668e862d5ddc..1a07bce0bd22 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1044,7 +1044,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) /* See configdb bunit SB addr map */ #define BUNIT_REG_BISOC 0x11 -#define PUNIT_REG_DSPFREQ 0x36 +#define PUNIT_REG_DSPSSPM 0x36 #define DSPFREQSTAT_SHIFT_CHV 24 #define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV) #define DSPFREQGUAR_SHIFT_CHV 8 diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 15ba950dee00..26e01a8465af 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -468,7 +468,7 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv, cdclk_state->vco); mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); mutex_unlock(&dev_priv->pcu_lock); if (IS_VALLEYVIEW(dev_priv)) @@ -543,11 +543,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val &= ~DSPFREQGUAR_MASK; val |= (cmd << DSPFREQGUAR_SHIFT); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); @@ -624,11 +624,11 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val &= ~DSPFREQGUAR_MASK_CHV; val |= (cmd << DSPFREQGUAR_SHIFT_CHV); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index af265d831011..7745ce20a6cd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -335,12 +335,12 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); if (enable) val |= DSP_MAXFIFO_PM5_ENABLE; else val &= ~DSP_MAXFIFO_PM5_ENABLE; - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); mutex_unlock(&dev_priv->pcu_lock); } @@ -6063,7 +6063,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) if (IS_CHERRYVIEW(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); if (val & DSP_MAXFIFO_PM5_ENABLE) wm->level = VLV_WM_LEVEL_PM5; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index a017a4232c0f..2d8673150c44 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1760,7 +1760,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, mutex_lock(&dev_priv->pcu_lock); - state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); + state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); /* * We only ever set the power-on and power-gate states, anything * else is unexpected. @@ -1772,7 +1772,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, * A transient state at this point would mean some unexpected party * is poking at the power controls too. */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); WARN_ON(ctrl << 16 != state); mutex_unlock(&dev_priv->pcu_lock); @@ -1793,20 +1793,20 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, mutex_lock(&dev_priv->pcu_lock); #define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) + ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) if (COND) goto out; - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); ctrl &= ~DP_SSC_MASK(pipe); ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); if (wait_for(COND, 100)) DRM_ERROR("timeout setting power well state %08x (%08x)\n", state, - vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); + vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); #undef COND -- cgit v1.2.3 From 2ed8e1f560e517baca4763204edbf76255c8e54e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 13 Feb 2019 18:54:23 +0200 Subject: drm/i915: Include "ignore lines" in skl+ wm state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We'll need to poke at the "ignore lines" bit in the skl+ watermark registers for a w/a. Include that bit in the wm state. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190213165424.22904-2-ville.syrjala@linux.intel.com Reviewed-by: Clint Taylor --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 44 ++++++++++++++++++++++++++--------------- 3 files changed, 30 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_pm.c') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 17fe942eaafa..5c8d0489a1cd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1126,6 +1126,7 @@ struct skl_wm_level { u16 plane_res_b; u8 plane_res_l; bool plane_en; + bool ignore_lines; }; /* Stores plane specific WM parameters */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2ee810d466ad..a5a47369cbd5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6032,6 +6032,7 @@ enum { #define _CUR_WM_TRANS_A_0 0x70168 #define _CUR_WM_TRANS_B_0 0x71168 #define PLANE_WM_EN (1 << 31) +#define PLANE_WM_IGNORE_LINES (1 << 30) #define PLANE_WM_LINES_SHIFT 14 #define PLANE_WM_LINES_MASK 0x1f #define PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 7745ce20a6cd..9485645a41b0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5053,11 +5053,12 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, { u32 val = 0; - if (level->plane_en) { + if (level->plane_en) val |= PLANE_WM_EN; - val |= level->plane_res_b; - val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; - } + if (level->ignore_lines) + val |= PLANE_WM_IGNORE_LINES; + val |= level->plane_res_b; + val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; I915_WRITE_FW(reg, val); } @@ -5123,6 +5124,7 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2) { return l1->plane_en == l2->plane_en && + l1->ignore_lines == l2->ignore_lines && l1->plane_res_l == l2->plane_res_l && l1->plane_res_b == l2->plane_res_b; } @@ -5331,19 +5333,28 @@ skl_print_wm_changes(struct intel_atomic_state *state) enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm.plane_en)); - DRM_DEBUG_KMS("[PLANE:%d:%s] lines %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" + " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n", plane->base.base.id, plane->base.name, - old_wm->wm[0].plane_res_l, old_wm->wm[1].plane_res_l, - old_wm->wm[2].plane_res_l, old_wm->wm[3].plane_res_l, - old_wm->wm[4].plane_res_l, old_wm->wm[5].plane_res_l, - old_wm->wm[6].plane_res_l, old_wm->wm[7].plane_res_l, - old_wm->trans_wm.plane_res_l, - new_wm->wm[0].plane_res_l, new_wm->wm[1].plane_res_l, - new_wm->wm[2].plane_res_l, new_wm->wm[3].plane_res_l, - new_wm->wm[4].plane_res_l, new_wm->wm[5].plane_res_l, - new_wm->wm[6].plane_res_l, new_wm->wm[7].plane_res_l, - new_wm->trans_wm.plane_res_l); + enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l, + enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, + enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l, + enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l, + enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l, + enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l, + enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l, + enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l, + enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l, + + enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l, + enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l, + enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l, + enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l, + enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l, + enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l, + enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l, + enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l, + enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l); DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", @@ -5686,6 +5697,7 @@ static inline void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) { level->plane_en = val & PLANE_WM_EN; + level->ignore_lines = val & PLANE_WM_IGNORE_LINES; level->plane_res_b = val & PLANE_WM_BLOCKS_MASK; level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) & PLANE_WM_LINES_MASK; -- cgit v1.2.3 From 290248c27c93ad70262b8112595b95ad9d867929 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 13 Feb 2019 18:54:24 +0200 Subject: drm/i915: Implement new w/a for underruns with wm1+ disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The new workaround from the hw team involves leaving WM1 still disabled but programming the blocks value identically to WM0, and we also need to set the "ignore lines watermark" bit for WM1. v2: Fix commit message wording a bit Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190213165424.22904-3-ville.syrjala@linux.intel.com Reviewed-by: Clint Taylor --- drivers/gpu/drm/i915/intel_pm.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/i915/intel_pm.c') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9485645a41b0..c7ec9b169046 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4463,6 +4463,13 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, for_each_plane_id_on_crtc(intel_crtc, plane_id) { wm = &cstate->wm.skl.optimal.planes[plane_id]; memset(&wm->wm[level], 0, sizeof(wm->wm[level])); + + /* W/A for underruns with WM1+ disabled */ + if (IS_ICELAKE(dev_priv) && + level == 1 && wm->wm[0].plane_en) { + wm->wm[level].plane_res_b = wm->wm[0].plane_res_b; + wm->wm[level].ignore_lines = true; + } } } -- cgit v1.2.3 From c384afe35200f090b10ff5b4e8c7e6ea6a54eb19 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 28 Feb 2019 19:36:39 +0200 Subject: drm/i915: Finalize Wa_1408961008:icl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The icl wm1+ underrun w/a has been added to the spec. It changed slightly from the previous incarnation by requiring that we mirror the lines watermark and the ignore lines bit from WM0 into WM1. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190228173639.18422-1-ville.syrjala@linux.intel.com Reviewed-by: Matt Roper Tested-by: Clint Taylor --- drivers/gpu/drm/i915/intel_pm.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_pm.c') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4c0e43caa5cd..9c97a95c1816 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4467,11 +4467,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, wm = &cstate->wm.skl.optimal.planes[plane_id]; memset(&wm->wm[level], 0, sizeof(wm->wm[level])); - /* W/A for underruns with WM1+ disabled */ + /* + * Wa_1408961008:icl + * Underruns with WM1+ disabled + */ if (IS_ICELAKE(dev_priv) && level == 1 && wm->wm[0].plane_en) { wm->wm[level].plane_res_b = wm->wm[0].plane_res_b; - wm->wm[level].ignore_lines = true; + wm->wm[level].plane_res_l = wm->wm[0].plane_res_l; + wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; } } } -- cgit v1.2.3 From 209d73530d7effed2acf7e5b88ea5cf8c73a800b Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 7 Mar 2019 12:32:35 +0200 Subject: drm/i915/icl: Prevent incorrect DBuf enabling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pretend that we have only 1 DBuf slice and that 1 slice is always enabled, until we have a proper way for on-demand toggling of the second slice. Currently we'll try to incorrectly enable DBuf even when all pipes are disabled and we are already runtime suspended (as the computed number of DBuf slices will be 1 in that case). This also means we'll leave the second slice enabled redundantly (except when suspended), but that's an acceptable tradeoff until we have a proper solution. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108756 Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190307103235.23538-1-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 7 ++++++- drivers/gpu/drm/i915/intel_runtime_pm.c | 12 ++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/intel_pm.c') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9c97a95c1816..bece16ae6d15 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3624,7 +3624,12 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) < 11) return enabled_slices; - if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE) + /* + * FIXME: for now we'll only ever use 1 slice; pretend that we have + * only that 1 slice enabled until we have a proper way for on-demand + * toggling of the second slice. + */ + if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE) enabled_slices++; return enabled_slices; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index aa974b11928a..676a89bb8194 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -3576,7 +3576,11 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv) !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) DRM_ERROR("DBuf power enable timeout\n"); else - dev_priv->wm.skl_hw.ddb.enabled_slices = 2; + /* + * FIXME: for now pretend that we only have 1 slice, see + * intel_enabled_dbuf_slices_num(). + */ + dev_priv->wm.skl_hw.ddb.enabled_slices = 1; } static void icl_dbuf_disable(struct drm_i915_private *dev_priv) @@ -3591,7 +3595,11 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv) (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) DRM_ERROR("DBuf power disable timeout!\n"); else - dev_priv->wm.skl_hw.ddb.enabled_slices = 0; + /* + * FIXME: for now pretend that the first slice is always + * enabled, see intel_enabled_dbuf_slices_num(). + */ + dev_priv->wm.skl_hw.ddb.enabled_slices = 1; } static void icl_mbus_init(struct drm_i915_private *dev_priv) -- cgit v1.2.3