summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1100
1 files changed, 416 insertions, 684 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3fc286cd1157..b6c886ac901b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -170,6 +170,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
} while (0)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
@@ -303,18 +304,18 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- new_val = dev_priv->pm_irq_mask;
+ new_val = dev_priv->pm_imr;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->pm_irq_mask) {
- dev_priv->pm_irq_mask = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+ if (new_val != dev_priv->pm_imr) {
+ dev_priv->pm_imr = new_val;
+ I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
POSTING_READ(gen6_pm_imr(dev_priv));
}
}
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -322,28 +323,54 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
snb_update_pm_irq(dev_priv, mask, mask);
}
-static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t mask)
+static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
- __gen6_disable_pm_irq(dev_priv, mask);
+ __gen6_mask_pm_irq(dev_priv, mask);
}
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(reg, dev_priv->pm_rps_events);
- I915_WRITE(reg, dev_priv->pm_rps_events);
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ I915_WRITE(reg, reset_mask);
+ I915_WRITE(reg, reset_mask);
POSTING_READ(reg);
+}
+
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier |= enable_mask;
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ gen6_unmask_pm_irq(dev_priv, enable_mask);
+ /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
+}
+
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier &= ~disable_mask;
+ __gen6_mask_pm_irq(dev_priv, disable_mask);
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ /* though a barrier is missing here, but don't really need a one */
+}
+
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -357,8 +384,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
WARN_ON_ONCE(dev_priv->rps.pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
- dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -379,9 +404,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
- __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
- ~dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
synchronize_irq(dev_priv->drm.irq);
@@ -395,6 +418,38 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
gen6_reset_rps_interrupts(dev_priv);
}
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (!dev_priv->guc.interrupts_enabled) {
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
+ dev_priv->pm_guc_events);
+ dev_priv->guc.interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->guc.interrupts_enabled = false;
+
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
+
+ gen9_reset_guc_interrupts(dev_priv);
+}
+
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -670,8 +725,8 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
htotal = mode->crtc_htotal;
@@ -776,8 +831,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
@@ -912,21 +967,22 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *vblank_time,
unsigned flags)
{
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc;
- if (pipe >= INTEL_INFO(dev)->num_pipes) {
+ if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
/* Get drm_crtc to timestamp: */
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc == NULL) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
- if (!crtc->hwmode.crtc_clock) {
+ if (!crtc->base.hwmode.crtc_clock) {
DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
return -EBUSY;
}
@@ -934,7 +990,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
- &crtc->hwmode);
+ &crtc->base.hwmode);
}
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
@@ -990,76 +1046,60 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
- const struct intel_rps_ei *old,
- const struct intel_rps_ei *now,
- int threshold)
-{
- u64 time, c0;
- unsigned int mul = 100;
-
- if (old->cz_clock == 0)
- return false;
-
- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
- mul <<= 8;
-
- time = now->cz_clock - old->cz_clock;
- time *= threshold * dev_priv->czclk_freq;
-
- /* Workload can be split between render + media, e.g. SwapBuffers
- * being blitted in X after being rendered in mesa. To account for
- * this we need to combine both engines into our activity counter.
- */
- c0 = now->render_c0 - old->render_c0;
- c0 += now->media_c0 - old->media_c0;
- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
- return c0 >= time;
-}
-
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
- dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps_ei now;
u32 events = 0;
- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
return 0;
vlv_c0_read(dev_priv, &now);
if (now.cz_clock == 0)
return 0;
- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
- if (!vlv_c0_above(dev_priv,
- &dev_priv->rps.down_ei, &now,
- dev_priv->rps.down_threshold))
- events |= GEN6_PM_RP_DOWN_THRESHOLD;
- dev_priv->rps.down_ei = now;
- }
+ if (prev->cz_clock) {
+ u64 time, c0;
+ unsigned int mul;
+
+ mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
+ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+ mul <<= 8;
- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- if (vlv_c0_above(dev_priv,
- &dev_priv->rps.up_ei, &now,
- dev_priv->rps.up_threshold))
- events |= GEN6_PM_RP_UP_THRESHOLD;
- dev_priv->rps.up_ei = now;
+ time = now.cz_clock - prev->cz_clock;
+ time *= dev_priv->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ c0 = now.render_c0 - prev->render_c0;
+ c0 += now.media_c0 - prev->media_c0;
+ c0 *= mul;
+
+ if (c0 > time * dev_priv->rps.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * dev_priv->rps.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
}
+ dev_priv->rps.ei = now;
return events;
}
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
if (intel_engine_has_waiter(engine))
return true;
@@ -1084,7 +1124,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
client_boost = dev_priv->rps.client_boost;
dev_priv->rps.client_boost = false;
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1113,6 +1153,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
+
+ if (new_delay >= dev_priv->rps.max_freq_softlimit)
+ adj = 0;
/*
* For better performance, jump directly
* to RPe if we're below it.
@@ -1134,6 +1177,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
+
+ if (new_delay <= dev_priv->rps.min_freq_softlimit)
+ adj = 0;
} else { /* unknown event */
adj = 0;
}
@@ -1257,20 +1303,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[BCS]);
+ notify_ring(dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1323,11 +1369,13 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
- if (master_ctl & GEN8_GT_PM_IRQ) {
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
- if (gt_iir[2] & dev_priv->pm_rps_events) {
+ if (gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events)) {
I915_WRITE_FW(GEN8_GT_IIR(2),
- gt_iir[2] & dev_priv->pm_rps_events);
+ gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events));
ret = IRQ_HANDLED;
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
@@ -1340,25 +1388,28 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir[4])
{
if (gt_iir[0]) {
- gen8_cs_irq_handler(&dev_priv->engine[RCS],
+ gen8_cs_irq_handler(dev_priv->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[BCS],
+ gen8_cs_irq_handler(dev_priv->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
if (gt_iir[1]) {
- gen8_cs_irq_handler(&dev_priv->engine[VCS],
+ gen8_cs_irq_handler(dev_priv->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+ gen8_cs_irq_handler(dev_priv->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
if (gt_iir[3])
- gen8_cs_irq_handler(&dev_priv->engine[VECS],
+ gen8_cs_irq_handler(dev_priv->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
if (gt_iir[2] & dev_priv->pm_rps_events)
gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+
+ if (gt_iir[2] & dev_priv->pm_guc_events)
+ gen9_guc_irq_handler(dev_priv, gt_iir[2]);
}
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@@ -1491,41 +1542,68 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_pipe_crc_entry *entry;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct drm_driver *driver = dev_priv->drm.driver;
+ uint32_t crcs[5];
int head, tail;
spin_lock(&pipe_crc->lock);
+ if (pipe_crc->source) {
+ if (!pipe_crc->entries) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_DEBUG_KMS("spurious interrupt\n");
+ return;
+ }
- if (!pipe_crc->entries) {
- spin_unlock(&pipe_crc->lock);
- DRM_DEBUG_KMS("spurious interrupt\n");
- return;
- }
-
- head = pipe_crc->head;
- tail = pipe_crc->tail;
+ head = pipe_crc->head;
+ tail = pipe_crc->tail;
- if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
- spin_unlock(&pipe_crc->lock);
- DRM_ERROR("CRC buffer overflowing\n");
- return;
- }
+ if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_ERROR("CRC buffer overflowing\n");
+ return;
+ }
- entry = &pipe_crc->entries[head];
+ entry = &pipe_crc->entries[head];
- entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
- pipe);
- entry->crc[0] = crc0;
- entry->crc[1] = crc1;
- entry->crc[2] = crc2;
- entry->crc[3] = crc3;
- entry->crc[4] = crc4;
+ entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
+ entry->crc[0] = crc0;
+ entry->crc[1] = crc1;
+ entry->crc[2] = crc2;
+ entry->crc[3] = crc3;
+ entry->crc[4] = crc4;
- head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- pipe_crc->head = head;
+ head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+ pipe_crc->head = head;
- spin_unlock(&pipe_crc->lock);
+ spin_unlock(&pipe_crc->lock);
- wake_up_interruptible(&pipe_crc->wq);
+ wake_up_interruptible(&pipe_crc->wq);
+ } else {
+ /*
+ * For some not yet identified reason, the first CRC is
+ * bonkers. So let's just wait for the next vblank and read
+ * out the buggy result.
+ *
+ * On CHV sometimes the second CRC is bonkers as well, so
+ * don't trust that one either.
+ */
+ if (pipe_crc->skipped == 0 ||
+ (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
+ pipe_crc->skipped++;
+ spin_unlock(&pipe_crc->lock);
+ return;
+ }
+ spin_unlock(&pipe_crc->lock);
+ crcs[0] = crc0;
+ crcs[1] = crc1;
+ crcs[2] = crc2;
+ crcs[3] = crc3;
+ crcs[4] = crc4;
+ drm_crtc_add_crc_entry(&crtc->base, true,
+ drm_accurate_vblank_count(&crtc->base),
+ crcs);
+ }
}
#else
static inline void
@@ -1585,7 +1663,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&dev_priv->rps.work);
@@ -1598,13 +1676,48 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VECS]);
+ notify_ring(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
}
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
+{
+ if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
+ /* Sample the log buffer flush related bits & clear them out now
+ * itself from the message identity register to minimize the
+ * probability of losing a flush interrupt, when there are back
+ * to back flush interrupts.
+ * There can be a new flush interrupt, for different log buffer
+ * type (like for ISR), whilst Host is handling one (for DPC).
+ * Since same bit is used in message register for ISR & DPC, it
+ * could happen that GuC sets the bit for 2nd interrupt but Host
+ * clears out the bit on handling the 1st interrupt.
+ */
+ u32 msg, flush;
+
+ msg = I915_READ(SOFT_SCRATCH(15));
+ flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
+ INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
+ if (flush) {
+ /* Clear the message bits that are handled */
+ I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
+
+ /* Handle flush interrupt in bottom half */
+ queue_work(dev_priv->guc.log.flush_wq,
+ &dev_priv->guc.log.flush_work);
+
+ dev_priv->guc.log.flush_interrupt_count++;
+ } else {
+ /* Not clearing of unhandled event bits won't result in
+ * re-triggering of the interrupt.
+ */
+ }
+ }
+}
+
static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1796,6 +1909,10 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* signalled in iir */
valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & (I915_LPE_PIPE_A_INTERRUPT |
+ I915_LPE_PIPE_B_INTERRUPT))
+ intel_lpe_audio_irq_handler(dev_priv);
+
/*
* VLV_IIR is single buffered, and reflects the level
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
@@ -1876,6 +1993,11 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* signalled in iir */
valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & (I915_LPE_PIPE_A_INTERRUPT |
+ I915_LPE_PIPE_B_INTERRUPT |
+ I915_LPE_PIPE_C_INTERRUPT))
+ intel_lpe_audio_irq_handler(dev_priv);
+
/*
* VLV_IIR is single buffered, and reflects the level
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
@@ -2338,7 +2460,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
found = true;
}
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
bxt_hpd_irq_handler(dev_priv, tmp_mask,
@@ -2354,7 +2476,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
}
- if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
+ if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev_priv);
found = true;
}
@@ -2407,7 +2529,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
pipe_name(pipe),
fault_errors);
}
@@ -2551,92 +2673,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
-static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
+static inline void
+i915_err_print_instdone(struct drm_i915_private *dev_priv,
+ struct intel_instdone *instdone)
{
- uint32_t instdone[I915_NUM_INSTDONE_REG];
- u32 eir = I915_READ(EIR);
- int pipe, i;
+ int slice;
+ int subslice;
- if (!eir)
+ pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
+
+ if (INTEL_GEN(dev_priv) <= 3)
return;
- pr_err("render error detected, EIR: 0x%08x\n", eir);
+ pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
- i915_get_extra_instdone(dev_priv, instdone);
+ if (INTEL_GEN(dev_priv) <= 6)
+ return;
- if (IS_G4X(dev_priv)) {
- if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- if (eir & GM45_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->sampler[slice][subslice]);
- if (!IS_GEN2(dev_priv)) {
- if (eir & I915_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->row[slice][subslice]);
+}
- if (eir & I915_ERROR_MEMORY_REFRESH) {
- pr_err("memory refresh error:\n");
- for_each_pipe(dev_priv, pipe)
- pr_err("pipe %c stat: 0x%08x\n",
- pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
- /* pipestat has already been acked */
- }
- if (eir & I915_ERROR_INSTRUCTION) {
- pr_err("instruction error\n");
- pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- if (INTEL_GEN(dev_priv) < 4) {
- u32 ipeir = I915_READ(IPEIR);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
- I915_WRITE(IPEIR, ipeir);
- POSTING_READ(IPEIR);
- } else {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- }
+static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+{
+ u32 eir;
+
+ if (!IS_GEN2(dev_priv))
+ I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
- I915_WRITE(EIR, eir);
- POSTING_READ(EIR);
+ if (INTEL_GEN(dev_priv) < 4)
+ I915_WRITE(IPEIR, I915_READ(IPEIR));
+ else
+ I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
+
+ I915_WRITE(EIR, I915_READ(EIR));
eir = I915_READ(EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
- DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
@@ -2646,12 +2728,13 @@ static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
* i915_handle_error - handle a gpu error
* @dev_priv: i915 device private
* @engine_mask: mask representing engines that are hung
+ * @fmt: Error message format string
+ *
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
- * @fmt: Error message format string
*/
void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
@@ -2665,7 +2748,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
va_end(args);
i915_capture_error_state(dev_priv, engine_mask, error_msg);
- i915_report_and_clear_eir(dev_priv);
+ i915_clear_error_registers(dev_priv);
if (!engine_mask)
return;
@@ -2694,45 +2777,40 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (INTEL_INFO(dev)->gen >= 4)
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
- else
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2753,38 +2831,36 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS |
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2798,411 +2874,14 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static bool
-ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
-{
- if (INTEL_GEN(engine->i915) >= 8) {
- return (ipehr >> 23) == 0x1c;
- } else {
- ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
- return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER);
- }
-}
-
-static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
- u64 offset)
+static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
-
- if (INTEL_GEN(dev_priv) >= 8) {
- for_each_engine(signaller, dev_priv) {
- if (engine == signaller)
- continue;
-
- if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
- return signaller;
- }
- } else {
- u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
-
- for_each_engine(signaller, dev_priv) {
- if(engine == signaller)
- continue;
-
- if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
- return signaller;
- }
- }
-
- DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
- engine->name, ipehr, offset);
-
- return ERR_PTR(-ENODEV);
-}
-
-static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- void __iomem *vaddr;
- u32 cmd, ipehr, head;
- u64 offset = 0;
- int i, backwards;
-
- /*
- * This function does not support execlist mode - any attempt to
- * proceed further into this function will result in a kernel panic
- * when dereferencing ring->buffer, which is not set up in execlist
- * mode.
- *
- * The correct way of doing it would be to derive the currently
- * executing ring buffer from the current context, which is derived
- * from the currently running request. Unfortunately, to get the
- * current request we would have to grab the struct_mutex before doing
- * anything else, which would be ill-advised since some other thread
- * might have grabbed it already and managed to hang itself, causing
- * the hang checker to deadlock.
- *
- * Therefore, this function does not support execlist mode in its
- * current form. Just return NULL and move on.
- */
- if (engine->buffer == NULL)
- return NULL;
-
- ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine, ipehr))
- return NULL;
-
- /*
- * HEAD is likely pointing to the dword after the actual command,
- * so scan backwards until we find the MBOX. But limit it to just 3
- * or 4 dwords depending on the semaphore wait command size.
- * Note that we don't care about ACTHD here since that might
- * point at at batch, and semaphores are always emitted into the
- * ringbuffer itself.
- */
- head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
- vaddr = (void __iomem *)engine->buffer->vaddr;
-
- for (i = backwards; i; --i) {
- /*
- * Be paranoid and presume the hw has gone off into the wild -
- * our ring is smaller than what the hardware (and hence
- * HEAD_ADDR) allows. Also handles wrap-around.
- */
- head &= engine->buffer->size - 1;
-
- /* This here seems to blow up */
- cmd = ioread32(vaddr + head);
- if (cmd == ipehr)
- break;
-
- head -= 4;
- }
-
- if (!i)
- return NULL;
-
- *seqno = ioread32(vaddr + head + 4) + 1;
- if (INTEL_GEN(dev_priv) >= 8) {
- offset = ioread32(vaddr + head + 12);
- offset <<= 32;
- offset |= ioread32(vaddr + head + 8);
- }
- return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
-}
-
-static int semaphore_passed(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
- u32 seqno;
-
- engine->hangcheck.deadlock++;
-
- signaller = semaphore_waits_for(engine, &seqno);
- if (signaller == NULL)
- return -1;
-
- if (IS_ERR(signaller))
- return 0;
-
- /* Prevent pathological recursion due to driver bugs */
- if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
- return -1;
-
- if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
- return 1;
-
- /* cursory check for an unkickable deadlock */
- if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
- semaphore_passed(signaller) < 0)
- return -1;
-
- return 0;
-}
-
-static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
-
- for_each_engine(engine, dev_priv)
- engine->hangcheck.deadlock = 0;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
- u32 instdone[I915_NUM_INSTDONE_REG];
- bool stuck;
- int i;
-
- if (engine->id != RCS)
- return true;
-
- i915_get_extra_instdone(engine->i915, instdone);
-
- /* There might be unstable subunit states even when
- * actual head is not moving. Filter out the unstable ones by
- * accumulating the undone -> done transitions and only
- * consider those as progress.
- */
- stuck = true;
- for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
- const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
-
- if (tmp != engine->hangcheck.instdone[i])
- stuck = false;
-
- engine->hangcheck.instdone[i] |= tmp;
- }
-
- return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- if (acthd != engine->hangcheck.acthd) {
-
- /* Clear subunit states on head movement */
- memset(engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- return HANGCHECK_ACTIVE;
- }
-
- if (!subunits_stuck(engine))
- return HANGCHECK_ACTIVE;
-
- return HANGCHECK_HUNG;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- enum intel_engine_hangcheck_action ha;
- u32 tmp;
-
- ha = head_stuck(engine, acthd);
- if (ha != HANGCHECK_HUNG)
- return ha;
-
- if (IS_GEN2(dev_priv))
- return HANGCHECK_HUNG;
-
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- tmp = I915_READ_CTL(engine);
- if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, 0,
- "Kicking stuck wait on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- }
-
- if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
- switch (semaphore_passed(engine)) {
- default:
- return HANGCHECK_HUNG;
- case 1:
- i915_handle_error(dev_priv, 0,
- "Kicking stuck semaphore on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- case 0:
- return HANGCHECK_WAIT;
- }
- }
-
- return HANGCHECK_HUNG;
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void i915_hangcheck_elapsed(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- gpu_error.hangcheck_work.work);
- struct intel_engine_cs *engine;
- unsigned int hung = 0, stuck = 0;
- int busy_count = 0;
-#define BUSY 1
-#define KICK 5
-#define HUNG 20
-#define ACTIVE_DECAY 15
-
- if (!i915.enable_hangcheck)
- return;
-
- if (!READ_ONCE(dev_priv->gt.awake))
- return;
-
- /* As enabling the GPU requires fairly extensive mmio access,
- * periodically arm the mmio checker to see if we are triggering
- * any invalid access.
- */
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
-
- for_each_engine(engine, dev_priv) {
- bool busy = intel_engine_has_waiter(engine);
- u64 acthd;
- u32 seqno;
- u32 submit;
-
- semaphore_clear_deadlocks(dev_priv);
-
- /* We don't strictly need an irq-barrier here, as we are not
- * serving an interrupt request, be paranoid in case the
- * barrier has side-effects (such as preventing a broken
- * cacheline snoop) and so be sure that we can see the seqno
- * advance. If the seqno should stick, due to a stale
- * cacheline, we would erroneously declare the GPU hung.
- */
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- acthd = intel_engine_get_active_head(engine);
- seqno = intel_engine_get_seqno(engine);
- submit = READ_ONCE(engine->last_submitted_seqno);
-
- if (engine->hangcheck.seqno == seqno) {
- if (i915_seqno_passed(seqno, submit)) {
- engine->hangcheck.action = HANGCHECK_IDLE;
- } else {
- /* We always increment the hangcheck score
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, add a small increment to the
- * score so that we can catch a batch that is
- * being repeatedly kicked and so responsible
- * for stalling the machine.
- */
- engine->hangcheck.action =
- engine_stuck(engine, acthd);
-
- switch (engine->hangcheck.action) {
- case HANGCHECK_IDLE:
- case HANGCHECK_WAIT:
- break;
- case HANGCHECK_ACTIVE:
- engine->hangcheck.score += BUSY;
- break;
- case HANGCHECK_KICK:
- engine->hangcheck.score += KICK;
- break;
- case HANGCHECK_HUNG:
- engine->hangcheck.score += HUNG;
- break;
- }
- }
-
- if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
- hung |= intel_engine_flag(engine);
- if (engine->hangcheck.action != HANGCHECK_HUNG)
- stuck |= intel_engine_flag(engine);
- }
- } else {
- engine->hangcheck.action = HANGCHECK_ACTIVE;
-
- /* Gradually reduce the count so that we catch DoS
- * attempts across multiple batches.
- */
- if (engine->hangcheck.score > 0)
- engine->hangcheck.score -= ACTIVE_DECAY;
- if (engine->hangcheck.score < 0)
- engine->hangcheck.score = 0;
-
- /* Clear head and subunit states on seqno movement */
- acthd = 0;
-
- memset(engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
- }
-
- engine->hangcheck.seqno = seqno;
- engine->hangcheck.acthd = acthd;
- busy_count += busy;
- }
-
- if (hung) {
- char msg[80];
- unsigned int tmp;
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "No progress" : "Hang");
- for_each_engine_masked(engine, dev_priv, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return i915_handle_error(dev_priv, hung, msg);
- }
-
- /* Reset timer in case GPU hangs without another request being added */
- if (busy_count)
- i915_queue_hangcheck(dev_priv);
-}
-
-static void ibx_irq_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
GEN5_IRQ_RESET(SDE);
- if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+ if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
}
@@ -3218,7 +2897,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
WARN_ON(I915_READ(SDEIER) != 0);
@@ -3226,12 +2905,10 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_reset(struct drm_device *dev)
+static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
GEN5_IRQ_RESET(GT);
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
GEN5_IRQ_RESET(GEN6_PM);
}
@@ -3263,6 +2940,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
+ u32 val;
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
@@ -3279,6 +2957,12 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
WARN_ON(dev_priv->irq_mask != ~0);
+ val = (I915_LPE_PIPE_A_INTERRUPT |
+ I915_LPE_PIPE_B_INTERRUPT |
+ I915_LPE_PIPE_C_INTERRUPT);
+
+ enable_mask |= val;
+
dev_priv->irq_mask = ~enable_mask;
GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
@@ -3293,12 +2977,12 @@ static void ironlake_irq_reset(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
GEN5_IRQ_RESET(DE);
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
- ibx_irq_reset(dev);
+ ibx_irq_reset(dev_priv);
}
static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -3308,7 +2992,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3343,8 +3027,8 @@ static void gen8_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_DE_MISC_);
GEN5_IRQ_RESET(GEN8_PCU_);
- if (HAS_PCH_SPLIT(dev))
- ibx_irq_reset(dev);
+ if (HAS_PCH_SPLIT(dev_priv))
+ ibx_irq_reset(dev_priv);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3438,19 +3122,16 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, hotplug, enabled_irqs;
-
- hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ u32 hotplug;
/* Enable digital hotplug on the PCH */
hotplug = I915_READ(PCH_PORT_HOTPLUG);
- hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
- PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
+ hotplug |= PORTA_HOTPLUG_ENABLE |
+ PORTB_HOTPLUG_ENABLE |
+ PORTC_HOTPLUG_ENABLE |
+ PORTD_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
hotplug = I915_READ(PCH_PORT_HOTPLUG2);
@@ -3458,6 +3139,18 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
}
+static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ spt_hpd_detection_setup(dev_priv);
+}
+
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, hotplug, enabled_irqs;
@@ -3492,18 +3185,15 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_irq_setup(dev_priv);
}
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
+ u32 enabled_irqs)
{
- u32 hotplug_irqs, hotplug, enabled_irqs;
-
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
- hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
-
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ u32 hotplug;
hotplug = I915_READ(PCH_PORT_HOTPLUG);
- hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
- PORTA_HOTPLUG_ENABLE;
+ hotplug |= PORTA_HOTPLUG_ENABLE |
+ PORTB_HOTPLUG_ENABLE |
+ PORTC_HOTPLUG_ENABLE;
DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
hotplug, enabled_irqs);
@@ -3513,7 +3203,6 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
* For BXT invert bit has to be set based on AOB design
* for HPD detection logic, update it based on VBT fields.
*/
-
if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
hotplug |= BXT_DDIA_HPD_INVERT;
@@ -3527,21 +3216,44 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
+static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
+}
+
+static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
+ hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
+
+ bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+
+ __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
+}
+
static void ibx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
- if (HAS_PCH_IBX(dev))
+ if (HAS_PCH_IBX(dev_priv))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
gen5_assert_iir_is_zero(dev_priv, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
+
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
+ HAS_PCH_LPT(dev_priv))
+ ; /* TODO: Enable HPD detection on older PCH platforms too */
+ else
+ spt_hpd_detection_setup(dev_priv);
}
static void gen5_gt_irq_postinstall(struct drm_device *dev)
@@ -3552,14 +3264,14 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_DPF(dev)) {
+ if (HAS_L3_DPF(dev_priv)) {
/* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
- gt_irqs |= GT_PARITY_ERROR(dev);
+ dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
+ gt_irqs |= GT_PARITY_ERROR(dev_priv);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -3567,16 +3279,18 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_VEBOX(dev))
+ if (HAS_VEBOX(dev_priv)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+ dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ }
- dev_priv->pm_irq_mask = 0xffffffff;
- GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
+ dev_priv->pm_imr = 0xffffffff;
+ GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
@@ -3585,7 +3299,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 display_mask, extra_mask;
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
@@ -3616,7 +3330,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_postinstall(dev);
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev_priv)) {
/* Enable PCU event interrupts
*
* spinlocking not required here for correctness since interrupt
@@ -3696,14 +3410,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_L3_DPF(dev_priv))
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- dev_priv->pm_irq_mask = 0xffffffff;
+ dev_priv->pm_ier = 0x0;
+ dev_priv->pm_imr = ~dev_priv->pm_ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled.
+ * is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -3721,7 +3436,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS;
} else {
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
@@ -3732,7 +3447,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN8_PIPE_FIFO_UNDERRUN;
de_port_enables = de_port_masked;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
@@ -3750,19 +3465,22 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+
+ if (IS_GEN9_LP(dev_priv))
+ bxt_hpd_detection_setup(dev_priv);
}
static int gen8_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_pre_postinstall(dev);
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -3808,7 +3526,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3971,7 +3689,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4020,7 +3738,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4054,7 +3772,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_USER_INTERRUPT;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
@@ -4168,7 +3886,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4222,7 +3940,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4400,9 +4118,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4487,10 +4205,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ if (HAS_GUC_SCHED(dev_priv))
+ dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
@@ -4508,9 +4229,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->gen >= 8)
dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
- INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
- i915_hangcheck_elapsed);
-
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
@@ -4531,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
+ /* Most platforms treat the display irq block as an always-on
+ * power domain. vlv/chv can disable it at runtime and need
+ * special care to avoid writing any of the display block registers
+ * outside of the power domain. We defer setting up the display irqs
+ * in this case to the runtime pm.
+ */
+ dev_priv->display_irqs_enabled = true;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->display_irqs_enabled = false;
+
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
@@ -4539,16 +4267,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
dev->driver->irq_uninstall = cherryview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
dev->driver->irq_uninstall = valleyview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
@@ -4557,13 +4285,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_BROXTON(dev))
+ if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
+ else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
@@ -4577,21 +4305,25 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_GEN3(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
}
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- dev->driver->enable_vblank = i915_enable_vblank;
- dev->driver->disable_vblank = i915_disable_vblank;
}
}