summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/early-quirks.c1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c116
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c46
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h499
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c31
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c7
-rw-r--r--drivers/gpu/drm/i915/i915_params.c33
-rw-r--r--drivers/gpu/drm/i915/i915_params.h4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h40
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h15
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c85
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h183
-rw-r--r--drivers/gpu/drm/i915/intel_display.c203
-rw-r--r--drivers/gpu/drm/i915/intel_display.h321
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c10
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c61
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c12
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h106
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c5
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c315
-rw-r--r--include/drm/i915_pciids.h32
33 files changed, 1330 insertions, 893 deletions
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 3cbb2c78a9df..bae0d32e327b 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -528,6 +528,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_SKL_IDS(&gen9_early_ops),
INTEL_BXT_IDS(&gen9_early_ops),
INTEL_KBL_IDS(&gen9_early_ops),
+ INTEL_CFL_IDS(&gen9_early_ops),
INTEL_GLK_IDS(&gen9_early_ops),
INTEL_CNL_IDS(&gen9_early_ops),
};
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index fa36491495b1..108d21f34777 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -29,7 +29,6 @@ config DRM_I915_DEBUG
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
- select DRM_I915_TRACE_GEM
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -53,6 +52,7 @@ config DRM_I915_DEBUG_GEM
config DRM_I915_TRACE_GEM
bool "Insert extra ftrace output from the GEM internals"
+ depends on DRM_I915_DEBUG_GEM
select TRACING
default n
help
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d8c6ec3cca71..e968aeae1d84 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -37,40 +37,21 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
return to_i915(node->minor->dev);
}
-static __always_inline void seq_print_param(struct seq_file *m,
- const char *name,
- const char *type,
- const void *x)
-{
- if (!__builtin_strcmp(type, "bool"))
- seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "char *"))
- seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
- else
- BUILD_BUG();
-}
-
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ struct drm_printer p = drm_seq_file_printer(m);
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
-#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
+ intel_device_info_dump_flags(info, &p);
+ intel_device_info_dump_runtime(info, &p);
kernel_param_lock(THIS_MODULE);
-#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
- I915_PARAMS_FOR_EACH(PRINT_PARAM);
-#undef PRINT_PARAM
+ i915_params_dump(&i915_modparams, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
@@ -1601,20 +1582,23 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_fbc *fbc = &dev_priv->fbc;
- if (!HAS_FBC(dev_priv)) {
- seq_puts(m, "FBC unsupported on this chipset\n");
- return 0;
- }
+ if (!HAS_FBC(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->fbc.lock);
+ mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
seq_puts(m, "FBC enabled\n");
else
- seq_printf(m, "FBC disabled: %s\n",
- dev_priv->fbc.no_fbc_reason);
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+
+ if (fbc->work.scheduled)
+ seq_printf(m, "FBC worker scheduled on vblank %u, now %llu\n",
+ fbc->work.scheduled_vblank,
+ drm_crtc_vblank_count(&fbc->crtc->base));
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
@@ -1634,7 +1618,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "Compressing: %s\n", yesno(mask));
}
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_unlock(&fbc->lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -1681,10 +1665,8 @@ static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (!HAS_IPS(dev_priv)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_IPS(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -1770,10 +1752,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
- if (!HAS_LLC(dev_priv)) {
- seq_puts(m, "unsupported on this chipset\n");
- return 0;
- }
+ if (!HAS_LLC(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -2253,8 +2233,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_printer p;
- if (!HAS_HUC_UCODE(dev_priv))
- return 0;
+ if (!HAS_HUC(dev_priv))
+ return -ENODEV;
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
@@ -2272,8 +2252,8 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
struct drm_printer p;
u32 tmp, i;
- if (!HAS_GUC_UCODE(dev_priv))
- return 0;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
@@ -2346,29 +2326,16 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tTotal: %llu\n", tot);
}
-static bool check_guc_submission(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
-
- if (!guc->execbuf_client) {
- seq_printf(m, "GuC submission %s\n",
- HAS_GUC_SCHED(dev_priv) ?
- "disabled" :
- "not supported");
- return false;
- }
-
- return true;
-}
-
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->guc;
- if (!check_guc_submission(m))
- return 0;
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return -ENODEV;
+
+ GEM_BUG_ON(!guc->execbuf_client);
+ GEM_BUG_ON(!guc->preempt_client);
seq_printf(m, "Doorbell map:\n");
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
@@ -2395,8 +2362,8 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
unsigned int tmp;
int index;
- if (!check_guc_submission(m))
- return 0;
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
struct intel_engine_cs *engine;
@@ -2449,6 +2416,9 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
u32 *log;
int i = 0;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (dump_load_err)
obj = dev_priv->guc.load_err_log;
else if (dev_priv->guc.log.vma)
@@ -2480,6 +2450,9 @@ static int i915_guc_log_control_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (!dev_priv->guc.log.vma)
return -EINVAL;
@@ -2493,6 +2466,9 @@ static int i915_guc_log_control_set(void *data, u64 val)
struct drm_i915_private *dev_priv = data;
int ret;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (!dev_priv->guc.log.vma)
return -EINVAL;
@@ -2543,10 +2519,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enum pipe pipe;
bool enabled = false;
- if (!HAS_PSR(dev_priv)) {
- seq_puts(m, "PSR not supported\n");
- return 0;
- }
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -2785,10 +2759,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_csr *csr;
- if (!HAS_CSR(dev_priv)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_CSR(dev_priv))
+ return -ENODEV;
csr = &dev_priv->csr;
@@ -3324,7 +3296,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
int plane;
if (INTEL_GEN(dev_priv) < 9)
- return 0;
+ return -ENODEV;
drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ca9f4b2862eb..6c8da9d20c33 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -931,8 +931,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_display_crc_init(dev_priv);
- intel_device_info_dump(dev_priv);
-
intel_detect_preproduction_hw(dev_priv);
return 0;
@@ -1084,7 +1082,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
- intel_device_info_runtime_init(dev_priv);
+ intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
intel_sanitize_options(dev_priv);
@@ -1294,6 +1292,21 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_gem_shrinker_unregister(dev_priv);
}
+static void i915_welcome_messages(struct drm_i915_private *dev_priv)
+{
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer("i915 device info:");
+
+ intel_device_info_dump(&dev_priv->info, &p);
+ intel_device_info_dump_runtime(&dev_priv->info, &p);
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ DRM_INFO("DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @pdev: PCI device
@@ -1379,13 +1392,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_init_ipc(dev_priv);
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- DRM_INFO("DRM_I915_DEBUG enabled\n");
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
-
intel_runtime_pm_put(dev_priv);
+ i915_welcome_messages(dev_priv);
+
return 0;
out_cleanup_hw:
@@ -1924,9 +1934,6 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
goto taint;
}
- i915_gem_reset(i915);
- intel_overlay_reset(i915);
-
/* Ok, now get things going again... */
/*
@@ -1939,6 +1946,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
goto error;
}
+ i915_gem_reset(i915);
+ intel_overlay_reset(i915);
+
/*
* Next we need to restore the context, but we don't use those
* yet either...
@@ -2011,19 +2021,19 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+ active_request = i915_gem_reset_prepare_engine(engine);
+ if (IS_ERR_OR_NULL(active_request)) {
+ /* Either the previous reset failed, or we pardon the reset. */
+ ret = PTR_ERR(active_request);
+ goto out;
+ }
+
if (!(flags & I915_RESET_QUIET)) {
dev_notice(engine->i915->drm.dev,
"Resetting %s after gpu hang\n", engine->name);
}
error->reset_engine_count[engine->id]++;
- active_request = i915_gem_reset_prepare_engine(engine);
- if (IS_ERR(active_request)) {
- DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
- ret = PTR_ERR(active_request);
- goto out;
- }
-
if (!engine->i915->guc.execbuf_client)
ret = intel_gt_reset_engine(engine->i915, engine);
else
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1aba5657f5f0..19df7b7214ed 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,12 +56,15 @@
#include "i915_reg.h"
#include "i915_utils.h"
-#include "intel_uncore.h"
#include "intel_bios.h"
+#include "intel_device_info.h"
+#include "intel_display.h"
#include "intel_dpll_mgr.h"
-#include "intel_uc.h"
#include "intel_lrc.h"
+#include "intel_opregion.h"
#include "intel_ringbuffer.h"
+#include "intel_uncore.h"
+#include "intel_uc.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
@@ -243,174 +246,6 @@ static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
return clamp_u64_to_fixed16(interm_sum);
}
-static inline const char *yesno(bool v)
-{
- return v ? "yes" : "no";
-}
-
-static inline const char *onoff(bool v)
-{
- return v ? "on" : "off";
-}
-
-static inline const char *enableddisabled(bool v)
-{
- return v ? "enabled" : "disabled";
-}
-
-enum pipe {
- INVALID_PIPE = -1,
- PIPE_A = 0,
- PIPE_B,
- PIPE_C,
- _PIPE_EDP,
- I915_MAX_PIPES = _PIPE_EDP
-};
-#define pipe_name(p) ((p) + 'A')
-
-enum transcoder {
- TRANSCODER_A = 0,
- TRANSCODER_B,
- TRANSCODER_C,
- TRANSCODER_EDP,
- TRANSCODER_DSI_A,
- TRANSCODER_DSI_C,
- I915_MAX_TRANSCODERS
-};
-
-static inline const char *transcoder_name(enum transcoder transcoder)
-{
- switch (transcoder) {
- case TRANSCODER_A:
- return "A";
- case TRANSCODER_B:
- return "B";
- case TRANSCODER_C:
- return "C";
- case TRANSCODER_EDP:
- return "EDP";
- case TRANSCODER_DSI_A:
- return "DSI A";
- case TRANSCODER_DSI_C:
- return "DSI C";
- default:
- return "<invalid>";
- }
-}
-
-static inline bool transcoder_is_dsi(enum transcoder transcoder)
-{
- return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
-}
-
-/*
- * Global legacy plane identifier. Valid only for primary/sprite
- * planes on pre-g4x, and only for primary planes on g4x-bdw.
- */
-enum i9xx_plane_id {
- PLANE_A,
- PLANE_B,
- PLANE_C,
-};
-#define plane_name(p) ((p) + 'A')
-
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
-
-/*
- * Per-pipe plane identifier.
- * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
- * number of planes per CRTC. Not all platforms really have this many planes,
- * which means some arrays of size I915_MAX_PLANES may have unused entries
- * between the topmost sprite plane and the cursor plane.
- *
- * This is expected to be passed to various register macros
- * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
- */
-enum plane_id {
- PLANE_PRIMARY,
- PLANE_SPRITE0,
- PLANE_SPRITE1,
- PLANE_SPRITE2,
- PLANE_CURSOR,
- I915_MAX_PLANES,
-};
-
-#define for_each_plane_id_on_crtc(__crtc, __p) \
- for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
- for_each_if ((__crtc)->plane_ids_mask & BIT(__p))
-
-enum port {
- PORT_NONE = -1,
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- I915_MAX_PORTS
-};
-#define port_name(p) ((p) + 'A')
-
-#define I915_NUM_PHYS_VLV 2
-
-enum dpio_channel {
- DPIO_CH0,
- DPIO_CH1
-};
-
-enum dpio_phy {
- DPIO_PHY0,
- DPIO_PHY1,
- DPIO_PHY2,
-};
-
-enum intel_display_power_domain {
- POWER_DOMAIN_PIPE_A,
- POWER_DOMAIN_PIPE_B,
- POWER_DOMAIN_PIPE_C,
- POWER_DOMAIN_PIPE_A_PANEL_FITTER,
- POWER_DOMAIN_PIPE_B_PANEL_FITTER,
- POWER_DOMAIN_PIPE_C_PANEL_FITTER,
- POWER_DOMAIN_TRANSCODER_A,
- POWER_DOMAIN_TRANSCODER_B,
- POWER_DOMAIN_TRANSCODER_C,
- POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_DSI_A,
- POWER_DOMAIN_TRANSCODER_DSI_C,
- POWER_DOMAIN_PORT_DDI_A_LANES,
- POWER_DOMAIN_PORT_DDI_B_LANES,
- POWER_DOMAIN_PORT_DDI_C_LANES,
- POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_A_IO,
- POWER_DOMAIN_PORT_DDI_B_IO,
- POWER_DOMAIN_PORT_DDI_C_IO,
- POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DSI,
- POWER_DOMAIN_PORT_CRT,
- POWER_DOMAIN_PORT_OTHER,
- POWER_DOMAIN_VGA,
- POWER_DOMAIN_AUDIO,
- POWER_DOMAIN_PLLS,
- POWER_DOMAIN_AUX_A,
- POWER_DOMAIN_AUX_B,
- POWER_DOMAIN_AUX_C,
- POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_GMBUS,
- POWER_DOMAIN_MODESET,
- POWER_DOMAIN_GT_IRQ,
- POWER_DOMAIN_INIT,
-
- POWER_DOMAIN_NUM,
-};
-
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
-#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) \
- ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
-
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -472,121 +307,6 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
- for_each_if ((__mask) & (1 << (__p)))
-#define for_each_universal_plane(__dev_priv, __pipe, __p) \
- for ((__p) = 0; \
- (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
- (__p)++)
-#define for_each_sprite(__dev_priv, __p, __s) \
- for ((__s) = 0; \
- (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
- (__s)++)
-
-#define for_each_port_masked(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if ((__ports_mask) & (1 << (__port)))
-
-#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-
-#define for_each_intel_plane(dev, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head)
-
-#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((plane_mask) & \
- (1 << drm_plane_index(&intel_plane->base)))
-
-#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
-
-#define for_each_intel_crtc(dev, intel_crtc) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head)
-
-#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head) \
- for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
-
-#define for_each_intel_encoder(dev, intel_encoder) \
- list_for_each_entry(intel_encoder, \
- &(dev)->mode_config.encoder_list, \
- base.head)
-
-#define for_each_intel_connector_iter(intel_connector, iter) \
- while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
-
-#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
- list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
- for_each_if ((intel_encoder)->base.crtc == (__crtc))
-
-#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
- list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
- for_each_if ((intel_connector)->base.encoder == (__encoder))
-
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if (BIT_ULL(domain) & (mask))
-
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
- (__power_well)++)
-
-#define for_each_power_well_rev(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
- (__power_well)--)
-
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well(__dev_priv, __power_well) \
- for_each_if ((__power_well)->domains & (__domain_mask))
-
-#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_rev(__dev_priv, __power_well) \
- for_each_if ((__power_well)->domains & (__domain_mask))
-
-#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_total_plane && \
- ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
- (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
- (__i)++) \
- for_each_if (plane)
-
-#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_crtc && \
- ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
- (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
- (__i)++) \
- for_each_if (crtc)
-
-#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_total_plane && \
- ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
- (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
- (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
- (__i)++) \
- for_each_if (plane)
-
struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
@@ -623,20 +343,6 @@ struct drm_i915_file_private {
atomic_t context_bans;
};
-/* Used by dp and fdi links */
-struct intel_link_m_n {
- uint32_t tu;
- uint32_t gmch_m;
- uint32_t gmch_n;
- uint32_t link_m;
- uint32_t link_n;
-};
-
-void intel_link_compute_m_n(int bpp, int nlanes,
- int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool reduce_m_n);
-
/* Interface history:
*
* 1.1: Original.
@@ -651,27 +357,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
-struct opregion_header;
-struct opregion_acpi;
-struct opregion_swsci;
-struct opregion_asle;
-
-struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- u32 swsci_gbda_sub_functions;
- u32 swsci_sbcb_sub_functions;
- struct opregion_asle *asle;
- void *rvda;
- void *vbt_firmware;
- const void *vbt;
- u32 vbt_size;
- u32 *lid_state;
- struct work_struct asle_work;
-};
-#define OPREGION_SIZE (8*1024)
-
struct intel_overlay;
struct intel_overlay_error_state;
@@ -764,137 +449,6 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
-#define DEV_INFO_FOR_EACH_FLAG(func) \
- func(is_mobile); \
- func(is_lp); \
- func(is_alpha_support); \
- /* Keep has_* in alphabetical order */ \
- func(has_64bit_reloc); \
- func(has_aliasing_ppgtt); \
- func(has_csr); \
- func(has_ddi); \
- func(has_dp_mst); \
- func(has_reset_engine); \
- func(has_fbc); \
- func(has_fpga_dbg); \
- func(has_full_ppgtt); \
- func(has_full_48bit_ppgtt); \
- func(has_gmch_display); \
- func(has_guc); \
- func(has_guc_ct); \
- func(has_hotplug); \
- func(has_l3_dpf); \
- func(has_llc); \
- func(has_logical_ring_contexts); \
- func(has_logical_ring_preemption); \
- func(has_overlay); \
- func(has_pooled_eu); \
- func(has_psr); \
- func(has_rc6); \
- func(has_rc6p); \
- func(has_resource_streamer); \
- func(has_runtime_pm); \
- func(has_snoop); \
- func(unfenced_needs_alignment); \
- func(cursor_needs_physical); \
- func(hws_needs_physical); \
- func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_ipc);
-
-struct sseu_dev_info {
- u8 slice_mask;
- u8 subslice_mask;
- u8 eu_total;
- u8 eu_per_subslice;
- u8 min_eu_in_pool;
- /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
- u8 subslice_7eu[3];
- u8 has_slice_pg:1;
- u8 has_subslice_pg:1;
- u8 has_eu_pg:1;
-};
-
-static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
-{
- return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
-}
-
-/* Keep in gen based order, and chronological order within a gen */
-enum intel_platform {
- INTEL_PLATFORM_UNINITIALIZED = 0,
- INTEL_I830,
- INTEL_I845G,
- INTEL_I85X,
- INTEL_I865G,
- INTEL_I915G,
- INTEL_I915GM,
- INTEL_I945G,
- INTEL_I945GM,
- INTEL_G33,
- INTEL_PINEVIEW,
- INTEL_I965G,
- INTEL_I965GM,
- INTEL_G45,
- INTEL_GM45,
- INTEL_IRONLAKE,
- INTEL_SANDYBRIDGE,
- INTEL_IVYBRIDGE,
- INTEL_VALLEYVIEW,
- INTEL_HASWELL,
- INTEL_BROADWELL,
- INTEL_CHERRYVIEW,
- INTEL_SKYLAKE,
- INTEL_BROXTON,
- INTEL_KABYLAKE,
- INTEL_GEMINILAKE,
- INTEL_COFFEELAKE,
- INTEL_CANNONLAKE,
- INTEL_MAX_PLATFORMS
-};
-
-struct intel_device_info {
- u16 device_id;
- u16 gen_mask;
-
- u8 gen;
- u8 gt; /* GT number, 0 if undefined */
- u8 num_rings;
- u8 ring_mask; /* Rings supported by the HW */
-
- enum intel_platform platform;
- u32 platform_mask;
-
- u32 display_mmio_offset;
-
- u8 num_pipes;
- u8 num_sprites[I915_MAX_PIPES];
- u8 num_scalers[I915_MAX_PIPES];
-
- unsigned int page_sizes; /* page sizes supported by the HW */
-
-#define DEFINE_FLAG(name) u8 name:1
- DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
-#undef DEFINE_FLAG
- u16 ddb_size; /* in blocks */
-
- /* Register offsets for the various display pipes and transcoders */
- int pipe_offsets[I915_MAX_TRANSCODERS];
- int trans_offsets[I915_MAX_TRANSCODERS];
- int palette_offsets[I915_MAX_PIPES];
- int cursor_offsets[I915_MAX_PIPES];
-
- /* Slice/subslice/EU info */
- struct sseu_dev_info sseu;
-
- u32 cs_timestamp_frequency_khz;
-
- struct color_luts {
- u16 degamma_lut_size;
- u16 gamma_lut_size;
- } color;
-};
-
struct intel_display_error_state;
struct i915_gpu_state {
@@ -948,6 +502,7 @@ struct i915_gpu_state {
struct drm_i915_error_engine {
int engine_id;
/* Software tracked state */
+ bool idle;
bool waiting;
int num_waiters;
unsigned long hangcheck_timestamp;
@@ -2405,6 +1960,9 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset_wq;
+
/* Display functions */
struct drm_i915_display_funcs display;
@@ -4111,41 +3669,6 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port);
-
-/* intel_opregion.c */
-#ifdef CONFIG_ACPI
-extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
-extern void intel_opregion_register(struct drm_i915_private *dev_priv);
-extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
-extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
-extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
- bool enable);
-extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
- pci_power_t state);
-extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
-#else
-static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
-static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
-static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
-static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
-{
-}
-static inline int
-intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
-{
- return 0;
-}
-static inline int
-intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
-{
- return 0;
-}
-static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
-{
- return -ENODEV;
-}
-#endif
-
/* intel_acpi.c */
#ifdef CONFIG_ACPI
extern void intel_register_dsm_handler(void);
@@ -4162,10 +3685,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)&dev_priv->info;
}
-const char *intel_platform_name(enum intel_platform platform);
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_dump(struct drm_i915_private *dev_priv);
-
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a7f5579a7a5..ba9f67c256f4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2596,7 +2596,7 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
err = obj->ops->get_pages(obj);
- GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
+ GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
return err;
}
@@ -3089,7 +3089,12 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- engine->irq_posted = 0;
+ /*
+ * Make sure this write is visible before we re-enable the interrupt
+ * handlers on another CPU, as tasklet_enable() resolves to just
+ * a compiler barrier which is insufficient for our purpose here.
+ */
+ smp_store_mb(engine->irq_posted, 0);
if (request)
request = i915_gem_reset_request(engine, request);
@@ -3119,6 +3124,25 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
engine->context_unpin(engine, ctx);
+
+ /*
+ * Ostensibily, we always want a context loaded for powersaving,
+ * so if the engine is idle after the reset, send a request
+ * to load our scratch kernel_context.
+ *
+ * More mysteriously, if we leave the engine idle after a reset,
+ * the next userspace batch may hang, with what appears to be
+ * an incoherent read by the CS (presumably stale TLB). An
+ * empty request appears sufficient to paper over the glitch.
+ */
+ if (list_empty(&engine->timeline->requests)) {
+ struct drm_i915_gem_request *rq;
+
+ rq = i915_gem_request_alloc(engine,
+ dev_priv->kernel_context);
+ if (!IS_ERR(rq))
+ __i915_add_request(rq, false);
+ }
}
i915_gem_restore_fences(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index ee83ec838ee7..a1d6956734f7 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
+#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
/* convert swiotlb segment size into sensible units (pages)! */
#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
@@ -95,7 +96,8 @@ create_st:
struct page *page;
do {
- page = alloc_pages(gfp | (order ? QUIET : 0), order);
+ page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
+ order);
if (page)
break;
if (!order--)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aba50aa613f1..944059322daa 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -416,6 +416,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
int n;
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
+ err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
@@ -564,34 +565,17 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info)
{
-#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
-}
+ struct drm_printer p = i915_error_printer(m);
-static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
- const char *name,
- const char *type,
- const void *x)
-{
- if (!__builtin_strcmp(type, "bool"))
- err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- err_printf(m, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "char *"))
- err_printf(m, "i915.%s=%s\n", name, *(const char **)x);
- else
- BUILD_BUG();
+ intel_device_info_dump_flags(info, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
- const struct i915_params *p)
+ const struct i915_params *params)
{
-#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x);
- I915_PARAMS_FOR_EACH(PRINT);
-#undef PRINT
+ struct drm_printer p = i915_error_printer(m);
+
+ i915_params_dump(params, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
@@ -1256,6 +1240,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->hws = I915_READ(mmio);
}
+ ee->idle = intel_engine_is_idle(engine);
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->hangcheck_action = engine->hangcheck.action;
ee->hangcheck_stalled = engine->hangcheck.stalled;
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 49a079494b68..79f8ec756362 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -96,6 +96,11 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
{
- if (static_cpu_has(X86_FEATURE_XMM4_1))
+ /*
+ * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
+ * emulation. So don't enable movntdqa in hypervisor guest.
+ */
+ if (static_cpu_has(X86_FEATURE_XMM4_1) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_enable(&has_movntdqa);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8dfea0320c2f..b5f3eb4fa8a3 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -22,6 +22,8 @@
* IN THE SOFTWARE.
*/
+#include <drm/drm_print.h>
+
#include "i915_params.h"
#include "i915_drv.h"
@@ -172,3 +174,34 @@ i915_param_named(enable_dpcd_backlight, bool, 0600,
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+
+static __always_inline void _print_param(struct drm_printer *p,
+ const char *name,
+ const char *type,
+ const void *x)
+{
+ if (!__builtin_strcmp(type, "bool"))
+ drm_printf(p, "i915.%s=%s\n", name, yesno(*(const bool *)x));
+ else if (!__builtin_strcmp(type, "int"))
+ drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
+ else if (!__builtin_strcmp(type, "unsigned int"))
+ drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "char *"))
+ drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
+ else
+ BUILD_BUG();
+}
+
+/**
+ * i915_params_dump - dump i915 modparams
+ * @params: i915 modparams
+ * @p: the &drm_printer
+ *
+ * Pretty printer for i915 modparams.
+ */
+void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
+{
+#define PRINT(T, x, ...) _print_param(p, #x, #T, &params->x);
+ I915_PARAMS_FOR_EACH(PRINT);
+#undef PRINT
+}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 792ce26d7449..c96360398072 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -28,6 +28,8 @@
#include <linux/bitops.h>
#include <linux/cache.h> /* for __read_mostly */
+struct drm_printer;
+
#define ENABLE_GUC_SUBMISSION BIT(0)
#define ENABLE_GUC_LOAD_HUC BIT(1)
@@ -77,5 +79,7 @@ struct i915_params {
extern struct i915_params i915_modparams __read_mostly;
+void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index fa67d3dde20e..36d48422b475 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -633,6 +633,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info),
INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 09bf043c1c2e..41285bec8fc0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3278,6 +3278,7 @@ enum i915_power_well_id {
# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
+# define PNV_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 24) /* pnv */
# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
@@ -3858,6 +3859,9 @@ enum {
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
+#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
+#define BXT_GMBUS_GATING_DIS (1 << 14)
+
#define _CLKGATE_DIS_PSL_A 0x46520
#define _CLKGATE_DIS_PSL_B 0x46524
#define _CLKGATE_DIS_PSL_C 0x46528
@@ -3875,6 +3879,9 @@ enum {
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
+#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
+#define VFUNIT_CLKGATE_DIS (1 << 20)
+
/*
* Display engine regs
*/
@@ -6329,6 +6336,7 @@ enum {
#define PLANE_CTL_TILED_X ( 1 << 10)
#define PLANE_CTL_TILED_Y ( 4 << 10)
#define PLANE_CTL_TILED_YF ( 5 << 10)
+#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8)
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
@@ -7552,6 +7560,7 @@ enum {
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
@@ -8142,6 +8151,7 @@ enum {
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
#define THROTTLE_12_5 (7<<2)
+#define DISABLE_EARLY_EOT (1<<1)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 4e76768ffa95..e1169c02eb2b 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -616,6 +616,7 @@ TRACE_EVENT(i915_gem_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
@@ -624,15 +625,16 @@ TRACE_EVENT(i915_gem_request_queue,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->flags)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->flags)
);
DECLARE_EVENT_CLASS(i915_gem_request,
@@ -641,23 +643,25 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, ctx)
+ __field(u32, hw_id)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->global = req->global_seqno;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->global)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->global)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
@@ -683,15 +687,17 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
__field(u32, global_seqno)
- __field(u32, ctx)
__field(u32, port)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
@@ -699,10 +705,10 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
__entry->port = port;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
- __entry->dev, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global_seqno,
- __entry->port)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
+ __entry->dev, __entry->hw_id, __entry->ring,
+ __entry->ctx, __entry->seqno,
+ __entry->global_seqno, __entry->port)
);
DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
@@ -772,6 +778,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
@@ -787,6 +794,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
@@ -794,10 +802,10 @@ TRACE_EVENT(i915_gem_request_wait_begin,
__entry->flags = flags;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
- __entry->flags)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->global,
+ !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 8d07764887ec..51dbfe5bb418 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -140,4 +140,19 @@ static inline void drain_delayed_work(struct delayed_work *dw)
} while (delayed_work_pending(dw));
}
+static inline const char *yesno(bool v)
+{
+ return v ? "yes" : "no";
+}
+
+static inline const char *onoff(bool v)
+{
+ return v ? "on" : "off";
+}
+
+static inline const char *enableddisabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 369f780588fb..f51645a08dca 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2095,6 +2095,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (WARN_ON(!pll))
return;
+ mutex_lock(&dev_priv->dpll_lock);
+
if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2115,7 +2117,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
val = I915_READ(DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
- DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
@@ -2124,6 +2126,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
} else if (INTEL_INFO(dev_priv)->gen < 9) {
I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
+
+ mutex_unlock(&dev_priv->dpll_lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index f478be3ae0ba..d28592e43512 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -22,6 +22,9 @@
*
*/
+#include <drm/drm_print.h>
+
+#include "intel_device_info.h"
#include "i915_drv.h"
#define PLATFORM_NAME(x) [INTEL_##x] = #x
@@ -67,21 +70,55 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_dump(struct drm_i915_private *dev_priv)
+void intel_device_info_dump_flags(const struct intel_device_info *info,
+ struct drm_printer *p)
{
- const struct intel_device_info *info = &dev_priv->info;
-
- DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
- intel_platform_name(info->platform),
- info->gen,
- dev_priv->drm.pdev->device,
- dev_priv->drm.pdev->revision);
-#define PRINT_FLAG(name) \
- DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
}
+static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
+{
+ drm_printf(p, "slice mask: %04x\n", sseu->slice_mask);
+ drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask));
+ drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
+ drm_printf(p, "subslice mask %04x\n", sseu->subslice_mask);
+ drm_printf(p, "subslice per slice: %u\n",
+ hweight8(sseu->subslice_mask));
+ drm_printf(p, "EU total: %u\n", sseu->eu_total);
+ drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
+ drm_printf(p, "has slice power gating: %s\n",
+ yesno(sseu->has_slice_pg));
+ drm_printf(p, "has subslice power gating: %s\n",
+ yesno(sseu->has_subslice_pg));
+ drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
+}
+
+void intel_device_info_dump_runtime(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ sseu_dump(&info->sseu, p);
+
+ drm_printf(p, "CS timestamp frequency: %u kHz\n",
+ info->cs_timestamp_frequency_khz);
+}
+
+void intel_device_info_dump(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(info, struct drm_i915_private, info);
+
+ drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+ INTEL_DEVID(dev_priv),
+ INTEL_REVID(dev_priv),
+ intel_platform_name(info->platform),
+ info->gen);
+
+ intel_device_info_dump_flags(info, p);
+}
+
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
@@ -420,7 +457,10 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
return 0;
}
-/*
+/**
+ * intel_device_info_runtime_init - initialize runtime info
+ * @info: intel device info struct
+ *
* Determine various intel_device_info fields at runtime.
*
* Use it when either:
@@ -433,9 +473,10 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
+void intel_device_info_runtime_init(struct intel_device_info *info)
{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct drm_i915_private *dev_priv =
+ container_of(info, struct drm_i915_private, info);
enum pipe pipe;
if (INTEL_GEN(dev_priv) >= 10) {
@@ -543,22 +584,4 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
-
- DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
- DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
- DRM_DEBUG_DRIVER("subslice total: %u\n",
- sseu_subslice_total(&info->sseu));
- DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
- DRM_DEBUG_DRIVER("subslice per slice: %u\n",
- hweight8(info->sseu.subslice_mask));
- DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
- DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
- DRM_DEBUG_DRIVER("has slice power gating: %s\n",
- info->sseu.has_slice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
- info->sseu.has_subslice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has EU power gating: %s\n",
- info->sseu.has_eu_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("CS timestamp frequency: %u kHz\n",
- info->cs_timestamp_frequency_khz);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
new file mode 100644
index 000000000000..49cb27bd04c1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DEVICE_INFO_H_
+#define _INTEL_DEVICE_INFO_H_
+
+#include "intel_display.h"
+
+struct drm_printer;
+struct drm_i915_private;
+
+/* Keep in gen based order, and chronological order within a gen */
+enum intel_platform {
+ INTEL_PLATFORM_UNINITIALIZED = 0,
+ /* gen2 */
+ INTEL_I830,
+ INTEL_I845G,
+ INTEL_I85X,
+ INTEL_I865G,
+ /* gen3 */
+ INTEL_I915G,
+ INTEL_I915GM,
+ INTEL_I945G,
+ INTEL_I945GM,
+ INTEL_G33,
+ INTEL_PINEVIEW,
+ /* gen4 */
+ INTEL_I965G,
+ INTEL_I965GM,
+ INTEL_G45,
+ INTEL_GM45,
+ /* gen5 */
+ INTEL_IRONLAKE,
+ /* gen6 */
+ INTEL_SANDYBRIDGE,
+ /* gen7 */
+ INTEL_IVYBRIDGE,
+ INTEL_VALLEYVIEW,
+ INTEL_HASWELL,
+ /* gen8 */
+ INTEL_BROADWELL,
+ INTEL_CHERRYVIEW,
+ /* gen9 */
+ INTEL_SKYLAKE,
+ INTEL_BROXTON,
+ INTEL_KABYLAKE,
+ INTEL_GEMINILAKE,
+ INTEL_COFFEELAKE,
+ /* gen10 */
+ INTEL_CANNONLAKE,
+ INTEL_MAX_PLATFORMS
+};
+
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+ func(is_mobile); \
+ func(is_lp); \
+ func(is_alpha_support); \
+ /* Keep has_* in alphabetical order */ \
+ func(has_64bit_reloc); \
+ func(has_aliasing_ppgtt); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_reset_engine); \
+ func(has_fbc); \
+ func(has_fpga_dbg); \
+ func(has_full_ppgtt); \
+ func(has_full_48bit_ppgtt); \
+ func(has_gmch_display); \
+ func(has_guc); \
+ func(has_guc_ct); \
+ func(has_hotplug); \
+ func(has_l3_dpf); \
+ func(has_llc); \
+ func(has_logical_ring_contexts); \
+ func(has_logical_ring_preemption); \
+ func(has_overlay); \
+ func(has_pooled_eu); \
+ func(has_psr); \
+ func(has_rc6); \
+ func(has_rc6p); \
+ func(has_resource_streamer); \
+ func(has_runtime_pm); \
+ func(has_snoop); \
+ func(unfenced_needs_alignment); \
+ func(cursor_needs_physical); \
+ func(hws_needs_physical); \
+ func(overlay_needs_physical); \
+ func(supports_tv); \
+ func(has_ipc);
+
+struct sseu_dev_info {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 eu_total;
+ u8 eu_per_subslice;
+ u8 min_eu_in_pool;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
+};
+
+struct intel_device_info {
+ u16 device_id;
+ u16 gen_mask;
+
+ u8 gen;
+ u8 gt; /* GT number, 0 if undefined */
+ u8 num_rings;
+ u8 ring_mask; /* Rings supported by the HW */
+
+ enum intel_platform platform;
+ u32 platform_mask;
+
+ u32 display_mmio_offset;
+
+ u8 num_pipes;
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
+
+ unsigned int page_sizes; /* page sizes supported by the HW */
+
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+ u16 ddb_size; /* in blocks */
+
+ /* Register offsets for the various display pipes and transcoders */
+ int pipe_offsets[I915_MAX_TRANSCODERS];
+ int trans_offsets[I915_MAX_TRANSCODERS];
+ int palette_offsets[I915_MAX_PIPES];
+ int cursor_offsets[I915_MAX_PIPES];
+
+ /* Slice/subslice/EU info */
+ struct sseu_dev_info sseu;
+
+ u32 cs_timestamp_frequency_khz;
+
+ struct color_luts {
+ u16 degamma_lut_size;
+ u16 gamma_lut_size;
+ } color;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+}
+
+const char *intel_platform_name(enum intel_platform platform);
+
+void intel_device_info_runtime_init(struct intel_device_info *info);
+void intel_device_info_dump(const struct intel_device_info *info,
+ struct drm_printer *p);
+void intel_device_info_dump_flags(const struct intel_device_info *info,
+ struct drm_printer *p);
+void intel_device_info_dump_runtime(const struct intel_device_info *info,
+ struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index efa6c6d19664..0cd355978ab4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3073,6 +3073,12 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
unsigned int rotation = plane_state->base.rotation;
int ret;
+ if (rotation & DRM_MODE_REFLECT_X &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+ return -EINVAL;
+ }
+
if (!plane_state->base.visible)
return 0;
@@ -3453,9 +3459,9 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
return 0;
}
-static u32 skl_plane_ctl_rotation(unsigned int rotation)
+static u32 skl_plane_ctl_rotate(unsigned int rotate)
{
- switch (rotation) {
+ switch (rotate) {
case DRM_MODE_ROTATE_0:
break;
/*
@@ -3469,7 +3475,22 @@ static u32 skl_plane_ctl_rotation(unsigned int rotation)
case DRM_MODE_ROTATE_270:
return PLANE_CTL_ROTATE_90;
default:
- MISSING_CASE(rotation);
+ MISSING_CASE(rotate);
+ }
+
+ return 0;
+}
+
+static u32 cnl_plane_ctl_flip(unsigned int reflect)
+{
+ switch (reflect) {
+ case 0:
+ break;
+ case DRM_MODE_REFLECT_X:
+ return PLANE_CTL_FLIP_HORIZONTAL;
+ case DRM_MODE_REFLECT_Y:
+ default:
+ MISSING_CASE(reflect);
}
return 0;
@@ -3497,7 +3518,11 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
+ plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ plane_ctl |= cnl_plane_ctl_flip(rotation &
+ DRM_MODE_REFLECT_MASK);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
@@ -9694,111 +9719,27 @@ err:
return ERR_PTR(ret);
}
-static u32
-intel_framebuffer_pitch_for_width(int width, int bpp)
-{
- u32 pitch = DIV_ROUND_UP(width * bpp, 8);
- return ALIGN(pitch, 64);
-}
-
-static u32
-intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp)
-{
- u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
- return PAGE_ALIGN(pitch * mode->vdisplay);
-}
-
-static struct drm_framebuffer *
-intel_framebuffer_create_for_mode(struct drm_device *dev,
- const struct drm_display_mode *mode,
- int depth, int bpp)
-{
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-
- obj = i915_gem_object_create(to_i915(dev),
- intel_framebuffer_size_for_mode(mode, bpp));
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- mode_cmd.width = mode->hdisplay;
- mode_cmd.height = mode->vdisplay;
- mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
- bpp);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb))
- i915_gem_object_put(obj);
-
- return fb;
-}
-
-static struct drm_framebuffer *
-mode_fits_in_fbdev(struct drm_device *dev,
- const struct drm_display_mode *mode)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj;
- struct drm_framebuffer *fb;
-
- if (!dev_priv->fbdev)
- return NULL;
-
- if (!dev_priv->fbdev->fb)
- return NULL;
-
- obj = dev_priv->fbdev->fb->obj;
- BUG_ON(!obj);
-
- fb = &dev_priv->fbdev->fb->base;
- if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->format->cpp[0] * 8))
- return NULL;
-
- if (obj->base.size < mode->vdisplay * fb->pitches[0])
- return NULL;
-
- drm_framebuffer_get(fb);
- return fb;
-#else
- return NULL;
-#endif
-}
-
-static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_framebuffer *fb,
- int x, int y)
+static int intel_modeset_disable_planes(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
{
+ struct drm_plane *plane;
struct drm_plane_state *plane_state;
- int hdisplay, vdisplay;
- int ret;
-
- plane_state = drm_atomic_get_plane_state(state, crtc->primary);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- if (mode)
- drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
- else
- hdisplay = vdisplay = 0;
+ int ret, i;
- ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
+ ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
- drm_atomic_set_fb_for_plane(plane_state, fb);
- plane_state->crtc_x = 0;
- plane_state->crtc_y = 0;
- plane_state->crtc_w = hdisplay;
- plane_state->crtc_h = vdisplay;
- plane_state->src_x = x << 16;
- plane_state->src_y = y << 16;
- plane_state->src_w = hdisplay << 16;
- plane_state->src_h = vdisplay << 16;
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ if (plane_state->crtc != crtc)
+ continue;
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
return 0;
}
@@ -9816,7 +9757,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
@@ -9884,10 +9824,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
found:
intel_crtc = to_intel_crtc(crtc);
- ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
- if (ret)
- goto fail;
-
state = drm_atomic_state_alloc(dev);
restore_state = drm_atomic_state_alloc(dev);
if (!state || !restore_state) {
@@ -9919,39 +9855,17 @@ found:
if (!mode)
mode = &load_detect_mode;
- /* We need a framebuffer large enough to accommodate all accesses
- * that the plane may generate whilst we perform load detection.
- * We can not rely on the fbcon either being present (we get called
- * during its initialisation to detect all boot displays, or it may
- * not even exist) or that it is large enough to satisfy the
- * requested mode.
- */
- fb = mode_fits_in_fbdev(dev, mode);
- if (fb == NULL) {
- DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
- fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
- } else
- DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
- if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- ret = PTR_ERR(fb);
- goto fail;
- }
-
- ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
- drm_framebuffer_put(fb);
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
goto fail;
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ ret = intel_modeset_disable_planes(state, crtc);
if (ret)
goto fail;
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
if (!ret)
ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
- if (!ret)
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
if (ret) {
DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
goto fail;
@@ -12569,11 +12483,15 @@ static int intel_atomic_commit(struct drm_device *dev,
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
i915_sw_fence_commit(&intel_state->commit_ready);
- if (nonblock)
+ if (nonblock && intel_state->modeset) {
+ queue_work(dev_priv->modeset_wq, &state->commit_work);
+ } else if (nonblock) {
queue_work(system_unbound_wq, &state->commit_work);
- else
+ } else {
+ if (intel_state->modeset)
+ flush_workqueue(dev_priv->modeset_wq);
intel_atomic_commit_tail(state);
-
+ }
return 0;
}
@@ -13238,7 +13156,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
modifiers = skl_format_modifiers_ccs;
@@ -13300,7 +13218,12 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
if (ret)
goto fail;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 10) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
@@ -14531,6 +14454,8 @@ int intel_modeset_init(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
+ dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
@@ -15335,6 +15260,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_gt_powersave(dev_priv);
intel_teardown_gmbus(dev_priv);
+
+ destroy_workqueue(dev_priv->modeset_wq);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
new file mode 100644
index 000000000000..a0d2b6169361
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright © 2006-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+enum pipe {
+ INVALID_PIPE = -1,
+
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ _PIPE_EDP,
+
+ I915_MAX_PIPES = _PIPE_EDP
+};
+
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ TRANSCODER_DSI_A,
+ TRANSCODER_DSI_C,
+
+ I915_MAX_TRANSCODERS
+};
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+ switch (transcoder) {
+ case TRANSCODER_A:
+ return "A";
+ case TRANSCODER_B:
+ return "B";
+ case TRANSCODER_C:
+ return "C";
+ case TRANSCODER_EDP:
+ return "EDP";
+ case TRANSCODER_DSI_A:
+ return "DSI A";
+ case TRANSCODER_DSI_C:
+ return "DSI C";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+ return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
+
+/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x-bdw.
+ */
+enum i9xx_plane_id {
+ PLANE_A,
+ PLANE_B,
+ PLANE_C,
+};
+
+#define plane_name(p) ((p) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC. Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
+ PLANE_SPRITE2,
+ PLANE_CURSOR,
+
+ I915_MAX_PLANES,
+};
+
+#define for_each_plane_id_on_crtc(__crtc, __p) \
+ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
+ for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
+enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
+};
+
+enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1,
+ DPIO_PHY2,
+};
+
+#define I915_NUM_PHYS_VLV 2
+
+enum intel_display_power_domain {
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
+ POWER_DOMAIN_PORT_DDI_A_LANES,
+ POWER_DOMAIN_PORT_DDI_B_LANES,
+ POWER_DOMAIN_PORT_DDI_C_LANES,
+ POWER_DOMAIN_PORT_DDI_D_LANES,
+ POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_A_IO,
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_C_IO,
+ POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_E_IO,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_PLLS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ u32 tu;
+ u32 gmch_m;
+ u32 gmch_n;
+ u32 link_m;
+ u32 link_n;
+};
+
+#define for_each_pipe(__dev_priv, __p) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for_each_if((__mask) & BIT(__p))
+
+#define for_each_universal_plane(__dev_priv, __pipe, __p) \
+ for ((__p) = 0; \
+ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p)++)
+
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if((__ports_mask) & BIT(__port))
+
+#define for_each_crtc(dev, crtc) \
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+#define for_each_intel_plane(dev, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head)
+
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((plane_mask) & \
+ BIT(drm_plane_index(&intel_plane->base)))
+
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((intel_plane)->pipe == (intel_crtc)->pipe)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head)
+
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
+
+#define for_each_intel_encoder(dev, intel_encoder) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head)
+
+#define for_each_intel_connector_iter(intel_connector, iter) \
+ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ for_each_if((intel_encoder)->base.crtc == (__crtc))
+
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+ for_each_if((intel_connector)->base.encoder == (__encoder))
+
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ for_each_if(BIT_ULL(domain) & (mask))
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_rev(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if((__power_well)->domains & (__domain_mask))
+
+#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_rev(__dev_priv, __power_well) \
+ for_each_if((__power_well)->domains & (__domain_mask))
+
+#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n,
+ bool reduce_m_n);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 510e0bc3a377..9856e24c7c43 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1272,6 +1272,9 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
+ /* WaDisableEarlyEOT:cnl */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+
return 0;
}
@@ -1757,6 +1760,24 @@ void intel_engine_dump(struct intel_engine_cs *engine,
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
+ if (INTEL_GEN(dev_priv) >= 8)
+ addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
+ RING_DMA_FADD_UDW(engine->mmio_base));
+ else if (INTEL_GEN(dev_priv) >= 4)
+ addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ else
+ addr = I915_READ(DMA_FADD_I8XX);
+ drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ if (INTEL_GEN(dev_priv) >= 4) {
+ drm_printf(m, "\tIPEIR: 0x%08x\n",
+ I915_READ(RING_IPEIR(engine->mmio_base)));
+ drm_printf(m, "\tIPEHR: 0x%08x\n",
+ I915_READ(RING_IPEHR(engine->mmio_base)));
+ } else {
+ drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
+ drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
+ }
if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 0acd9dd3ed5c..31f01d64c021 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -429,18 +429,18 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_hangcheck cur_state, *hc = &cur_state;
const bool busy = intel_engine_has_waiter(engine);
+ struct intel_engine_hangcheck hc;
semaphore_clear_deadlocks(dev_priv);
- hangcheck_load_sample(engine, hc);
- hangcheck_accumulate_sample(engine, hc);
- hangcheck_store_sample(engine, hc);
+ hangcheck_load_sample(engine, &hc);
+ hangcheck_accumulate_sample(engine, &hc);
+ hangcheck_store_sample(engine, &hc);
if (engine->hangcheck.stalled) {
hung |= intel_engine_flag(engine);
- if (hc->action != ENGINE_DEAD)
+ if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 49fdf09f9919..ef9f91a0b0c9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -128,22 +128,46 @@ intel_i2c_reset(struct drm_i915_private *dev_priv)
I915_WRITE(GMBUS4, 0);
}
-static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
{
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev_priv))
- return;
-
val = I915_READ(DSPCLK_GATE_D);
- if (enable)
- val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ if (!enable)
+ val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
- val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
}
+static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ u32 val;
+
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ if (!enable)
+ val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+}
+
+static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ u32 val;
+
+ val = I915_READ(GEN9_CLKGATE_DIS_4);
+ if (!enable)
+ val |= BXT_GMBUS_GATING_DIS;
+ else
+ val &= ~BXT_GMBUS_GATING_DIS;
+ I915_WRITE(GEN9_CLKGATE_DIS_4, val);
+}
+
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
@@ -221,7 +245,10 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
struct drm_i915_private *dev_priv = bus->dev_priv;
intel_i2c_reset(dev_priv);
- intel_i2c_quirk_set(dev_priv, true);
+
+ if (IS_PINEVIEW(dev_priv))
+ pnv_gmbus_clock_gating(dev_priv, false);
+
set_data(bus, 1);
set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
@@ -238,7 +265,9 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
set_data(bus, 1);
set_clock(bus, 1);
- intel_i2c_quirk_set(dev_priv, false);
+
+ if (IS_PINEVIEW(dev_priv))
+ pnv_gmbus_clock_gating(dev_priv, true);
}
static void
@@ -481,6 +510,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int i = 0, inc, try = 0;
int ret = 0;
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+ if (IS_GEN9_LP(dev_priv))
+ bxt_gmbus_clock_gating(dev_priv, false);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ pch_gmbus_clock_gating(dev_priv, false);
+
retry:
I915_WRITE_FW(GMBUS0, bus->reg0);
@@ -582,6 +618,13 @@ timeout:
ret = -EAGAIN;
out:
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+ if (IS_GEN9_LP(dev_priv))
+ bxt_gmbus_clock_gating(dev_priv, true);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ pch_gmbus_clock_gating(dev_priv, true);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 3bf65288ffff..5809b29044fc 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n");
+ DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2e38fbfdf08f..dc8dadd75052 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -449,7 +449,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n",
engine->name, n,
- rq->ctx->hw_id, count,
+ port[n].context_id, count,
rq->global_seqno);
} else {
GEM_BUG_ON(!n);
@@ -504,7 +504,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
ce->ring->tail &= (ce->ring->size - 1);
ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
- GEM_TRACE("\n");
+ GEM_TRACE("%s\n", engine->name);
for (n = execlists_num_ports(&engine->execlists); --n; )
elsp_write(0, engine->execlists.elsp);
@@ -861,7 +861,7 @@ static void execlists_submission_tasklet(unsigned long data)
*/
status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
- GEM_TRACE("%s csb[%dd]: status=0x%08x:0x%08x\n",
+ GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
engine->name, head,
status, buf[2*head + 1]);
@@ -905,8 +905,8 @@ static void execlists_submission_tasklet(unsigned long data)
rq = port_unpack(port, &count);
GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n",
engine->name,
- rq->ctx->hw_id, count,
- rq->global_seqno);
+ port->context_id, count,
+ rq ? rq->global_seqno : 0);
GEM_BUG_ON(count == 0);
if (--count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
@@ -1555,6 +1555,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
struct intel_context *ce;
unsigned long flags;
+ GEM_TRACE("%s seqno=%x\n",
+ engine->name, request ? request->global_seqno : 0);
spin_lock_irqsave(&engine->timeline->lock, flags);
/*
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index fc65f5e451dd..c58e5f53bab0 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -32,6 +32,8 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
+
+#include "intel_opregion.h"
#include "i915_drv.h"
#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
new file mode 100644
index 000000000000..e0e437ba9e51
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright © 2008-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_OPREGION_H_
+#define _INTEL_OPREGION_H_
+
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+struct drm_i915_private;
+struct intel_encoder;
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
+ struct opregion_asle *asle;
+ void *rvda;
+ void *vbt_firmware;
+ const void *vbt;
+ u32 vbt_size;
+ u32 *lid_state;
+ struct work_struct asle_work;
+};
+
+#define OPREGION_SIZE (8 * 1024)
+
+#ifdef CONFIG_ACPI
+
+int intel_opregion_setup(struct drm_i915_private *dev_priv);
+void intel_opregion_register(struct drm_i915_private *dev_priv);
+void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable);
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+ pci_power_t state);
+int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
+
+#else /* CONFIG_ACPI*/
+
+static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+ return 0;
+}
+
+static inline int
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
+{
+ return 0;
+}
+
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 18779c6eb4bf..1db79a860b96 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8447,6 +8447,11 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
val |= SARBUNIT_CLKGATE_DIS;
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+
+ /* WaDisableVFclkgate:cnl */
+ val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
+ val |= VFUNIT_CLKGATE_DIS;
+ I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
}
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a1ad85fa5c1a..2e32615eeada 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
- i915_reg_t psr_ctl;
+ i915_reg_t psr_status;
u32 psr_status_mask;
if (dev_priv->psr.aux_frame_sync)
@@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
0);
if (dev_priv->psr.psr2_support) {
- psr_ctl = EDP_PSR2_CTL;
+ psr_status = EDP_PSR2_STATUS_CTL;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) &
+ I915_WRITE(EDP_PSR2_CTL,
+ I915_READ(EDP_PSR2_CTL) &
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
} else {
- psr_ctl = EDP_PSR_STATUS_CTL;
+ psr_status = EDP_PSR_STATUS_CTL;
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+ I915_WRITE(EDP_PSR_CTL,
+ I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
}
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv,
- psr_ctl, psr_status_mask, 0,
+ psr_status, psr_status_mask, 0,
2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 96ab74f3d101..db9d57f39534 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1726,13 +1726,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
@@ -1792,6 +1792,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index e6b31041cc88..2ea69394f428 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1637,7 +1637,7 @@ static int igt_shrink_thp(void *arg)
* shmem to truncate our pages.
*/
i915_gem_shrink_all(i915);
- if (!IS_ERR_OR_NULL(obj->mm.pages)) {
+ if (i915_gem_object_has_pages(obj)) {
pr_err("shrink-all didn't truncate the pages\n");
err = -EINVAL;
goto out_close;
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index f98546b8a7fa..d1f91a533afa 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -132,6 +132,12 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
@@ -140,6 +146,12 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(i915) >= 4) {
@@ -147,12 +159,24 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
*batch++ = MI_STORE_DWORD_IMM;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
*batch++ = lower_32_bits(vma->node.start);
}
@@ -234,6 +258,16 @@ static void hang_fini(struct hang *h)
i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
}
+static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
+{
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 1000));
+}
+
static int igt_hang_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -296,6 +330,9 @@ static void global_reset_lock(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ pr_debug("%s: current gpu_error=%08lx\n",
+ __func__, i915->gpu_error.flags);
+
while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
wait_event(i915->gpu_error.reset_queue,
!test_bit(I915_RESET_BACKOFF,
@@ -353,54 +390,128 @@ static int igt_global_reset(void *arg)
return err;
}
-static int igt_reset_engine(void *arg)
+static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
- struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int reset_count, reset_engine_count;
+ struct hang h;
int err = 0;
- /* Check that we can issue a global GPU and engine reset */
+ /* Check that we can issue an engine reset on an idle engine (no-op) */
if (!intel_has_reset_engine(i915))
return 0;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return err;
+ }
+
for_each_engine(engine, i915, id) {
- set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ unsigned int reset_count, reset_engine_count;
+ IGT_TIMEOUT(end_time);
+
+ if (active && !intel_engine_can_store_dword(engine))
+ continue;
+
reset_count = i915_reset_count(&i915->gpu_error);
reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
engine);
- err = i915_reset_engine(engine, I915_RESET_QUIET);
- if (err) {
- pr_err("i915_reset_engine failed\n");
- break;
- }
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ do {
+ if (active) {
+ struct drm_i915_gem_request *rq;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ rq = hang_create_request(&h, engine,
+ i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ mutex_unlock(&i915->drm.struct_mutex);
+ break;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_for_hang(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_gem_request_put(rq);
+ err = -EIO;
+ break;
+ }
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
- pr_err("Full GPU reset recorded! (engine reset expected)\n");
- err = -EINVAL;
- break;
- }
+ i915_gem_request_put(rq);
+ }
+
+ engine->hangcheck.stalled = true;
+ engine->hangcheck.seqno =
+ intel_engine_get_seqno(engine);
+
+ err = i915_reset_engine(engine, I915_RESET_QUIET);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ reset_engine_count += active;
+ if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ reset_engine_count) {
+ pr_err("%s engine reset %srecorded!\n",
+ engine->name, active ? "not " : "");
+ err = -EINVAL;
+ break;
+ }
+
+ engine->hangcheck.stalled = false;
+ } while (time_before(jiffies, end_time));
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
- if (i915_reset_engine_count(&i915->gpu_error, engine) ==
- reset_engine_count) {
- pr_err("No %s engine reset recorded!\n", engine->name);
- err = -EINVAL;
+ if (err)
break;
- }
- clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ cond_resched();
}
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ hang_fini(&h);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
return err;
}
+static int igt_reset_idle_engine(void *arg)
+{
+ return __igt_reset_engine(arg, false);
+}
+
+static int igt_reset_active_engine(void *arg)
+{
+ return __igt_reset_engine(arg, true);
+}
+
static int active_engine(void *data)
{
struct intel_engine_cs *engine = data;
@@ -462,11 +573,12 @@ err_file:
return err;
}
-static int igt_reset_active_engines(void *arg)
+static int __igt_reset_engine_others(struct drm_i915_private *i915,
+ bool active)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine, *active;
+ struct intel_engine_cs *engine, *other;
enum intel_engine_id id, tmp;
+ struct hang h;
int err = 0;
/* Check that issuing a reset on one engine does not interfere
@@ -476,24 +588,36 @@ static int igt_reset_active_engines(void *arg)
if (!intel_has_reset_engine(i915))
return 0;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return err;
+ }
+
for_each_engine(engine, i915, id) {
- struct task_struct *threads[I915_NUM_ENGINES];
+ struct task_struct *threads[I915_NUM_ENGINES] = {};
unsigned long resets[I915_NUM_ENGINES];
unsigned long global = i915_reset_count(&i915->gpu_error);
+ unsigned long count = 0;
IGT_TIMEOUT(end_time);
+ if (active && !intel_engine_can_store_dword(engine))
+ continue;
+
memset(threads, 0, sizeof(threads));
- for_each_engine(active, i915, tmp) {
+ for_each_engine(other, i915, tmp) {
struct task_struct *tsk;
- if (active == engine)
- continue;
-
resets[tmp] = i915_reset_engine_count(&i915->gpu_error,
- active);
+ other);
- tsk = kthread_run(active_engine, active,
- "igt/%s", active->name);
+ if (other == engine)
+ continue;
+
+ tsk = kthread_run(active_engine, other,
+ "igt/%s", other->name);
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
goto unwind;
@@ -503,20 +627,70 @@ static int igt_reset_active_engines(void *arg)
get_task_struct(tsk);
}
- set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
+ if (active) {
+ struct drm_i915_gem_request *rq;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ rq = hang_create_request(&h, engine,
+ i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ mutex_unlock(&i915->drm.struct_mutex);
+ break;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_for_hang(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_gem_request_put(rq);
+ err = -EIO;
+ break;
+ }
+
+ i915_gem_request_put(rq);
+ }
+
+ engine->hangcheck.stalled = true;
+ engine->hangcheck.seqno =
+ intel_engine_get_seqno(engine);
+
err = i915_reset_engine(engine, I915_RESET_QUIET);
if (err) {
- pr_err("i915_reset_engine(%s) failed, err=%d\n",
- engine->name, err);
+ pr_err("i915_reset_engine(%s:%s) failed, err=%d\n",
+ engine->name, active ? "active" : "idle", err);
break;
}
+
+ engine->hangcheck.stalled = false;
+ count++;
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ pr_info("i915_reset_engine(%s:%s): %lu resets\n",
+ engine->name, active ? "active" : "idle", count);
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) -
+ resets[engine->id] != (active ? count : 0)) {
+ pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
+ engine->name, active ? "active" : "idle", count,
+ i915_reset_engine_count(&i915->gpu_error,
+ engine) - resets[engine->id]);
+ if (!err)
+ err = -EINVAL;
+ }
unwind:
- for_each_engine(active, i915, tmp) {
+ for_each_engine(other, i915, tmp) {
int ret;
if (!threads[tmp])
@@ -524,27 +698,29 @@ unwind:
ret = kthread_stop(threads[tmp]);
if (ret) {
- pr_err("kthread for active engine %s failed, err=%d\n",
- active->name, ret);
+ pr_err("kthread for other engine %s failed, err=%d\n",
+ other->name, ret);
if (!err)
err = ret;
}
put_task_struct(threads[tmp]);
if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error,
- active)) {
+ other)) {
pr_err("Innocent engine %s was reset (count=%ld)\n",
- active->name,
+ other->name,
i915_reset_engine_count(&i915->gpu_error,
- active) - resets[tmp]);
- err = -EIO;
+ other) - resets[tmp]);
+ if (!err)
+ err = -EINVAL;
}
}
if (global != i915_reset_count(&i915->gpu_error)) {
pr_err("Global reset (count=%ld)!\n",
i915_reset_count(&i915->gpu_error) - global);
- err = -EIO;
+ if (!err)
+ err = -EINVAL;
}
if (err)
@@ -556,9 +732,25 @@ unwind:
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ hang_fini(&h);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
return err;
}
+static int igt_reset_idle_engine_others(void *arg)
+{
+ return __igt_reset_engine_others(arg, false);
+}
+
+static int igt_reset_active_engine_others(void *arg)
+{
+ return __igt_reset_engine_others(arg, true);
+}
+
static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
{
u32 reset_count;
@@ -574,16 +766,6 @@ static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
return reset_count;
}
-static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
-{
- return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
- rq->fence.seqno),
- 10) &&
- wait_for(i915_seqno_passed(hws_seqno(h, rq),
- rq->fence.seqno),
- 1000));
-}
-
static int igt_wait_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -617,8 +799,8 @@ static int igt_wait_reset(void *arg)
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- rq->fence.seqno, hws_seqno(&h, rq));
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
i915_reset(i915, 0);
@@ -712,8 +894,8 @@ static int igt_reset_queue(void *arg)
if (!wait_for_hang(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- prev->fence.seqno, hws_seqno(&h, prev));
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, prev->fence.seqno, hws_seqno(&h, prev));
intel_engine_dump(prev->engine, &p,
"%s\n", prev->engine->name);
@@ -819,8 +1001,8 @@ static int igt_handle_error(void *arg)
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- rq->fence.seqno, hws_seqno(&h, rq));
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
i915_reset(i915, 0);
@@ -864,21 +1046,26 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_hang_sanitycheck),
- SUBTEST(igt_reset_engine),
- SUBTEST(igt_reset_active_engines),
+ SUBTEST(igt_reset_idle_engine),
+ SUBTEST(igt_reset_active_engine),
+ SUBTEST(igt_reset_idle_engine_others),
+ SUBTEST(igt_reset_active_engine_others),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
SUBTEST(igt_handle_error),
};
+ bool saved_hangcheck;
int err;
if (!intel_has_gpu_reset(i915))
return 0;
intel_runtime_pm_get(i915);
+ saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
err = i915_subtests(tests, i915);
+ i915_modparams.enable_hangcheck = saved_hangcheck;
intel_runtime_pm_put(i915);
return err;
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 972a25633525..5db0458dd832 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -373,24 +373,46 @@
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */
+ INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
+ INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */
#define INTEL_CFL_S_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */
+ INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
/* CFL H */
#define INTEL_CFL_H_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
-/* CFL U */
+/* CFL U GT1 */
+#define INTEL_CFL_U_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA1, info), \
+ INTEL_VGA_DEVICE(0x3EA4, info)
+
+/* CFL U GT2 */
+#define INTEL_CFL_U_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA0, info), \
+ INTEL_VGA_DEVICE(0x3EA3, info), \
+ INTEL_VGA_DEVICE(0x3EA9, info)
+
+/* CFL U GT3 */
#define INTEL_CFL_U_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */
+ INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
+
+#define INTEL_CFL_IDS(info) \
+ INTEL_CFL_S_GT1_IDS(info), \
+ INTEL_CFL_S_GT2_IDS(info), \
+ INTEL_CFL_H_GT2_IDS(info), \
+ INTEL_CFL_U_GT1_IDS(info), \
+ INTEL_CFL_U_GT2_IDS(info), \
+ INTEL_CFL_U_GT3_IDS(info)
/* CNL U 2+2 */
#define INTEL_CNL_U_GT2_IDS(info) \