diff options
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_scatterlist.c | 8 |
5 files changed, 31 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 30595b2b63e1..d29005980806 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -936,8 +936,12 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, } else { /* Use DSM base address instead for stolen memory */ dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK; - if (WARN_ON(lmem_size < dsm_base)) - return ERR_PTR(-ENODEV); + if (lmem_size < dsm_base) { + drm_dbg(&i915->drm, + "Disabling stolen memory support due to OOB placement: lmem_size = %pa vs dsm_base = %pa\n", + &lmem_size, &dsm_base); + return NULL; + } dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index c1ce6258e55c..8d08b38874ef 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -71,6 +71,8 @@ static int fw_domains_show(struct seq_file *m, void *data) struct intel_uncore_forcewake_domain *fw_domain; unsigned int tmp; + spin_lock_irq(&uncore->lock); + seq_printf(m, "user.bypass_count = %u\n", uncore->user_forcewake_count); @@ -79,6 +81,8 @@ static int fw_domains_show(struct seq_file *m, void *data) intel_uncore_forcewake_domain_to_str(fw_domain->id), READ_ONCE(fw_domain->wake_count)); + spin_unlock_irq(&uncore->lock); + return 0; } DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 14797e80bc92..263c9c3f6a03 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -295,7 +295,7 @@ struct guc_update_scheduling_policy_header { } __packed; /* - * Can't dynmically allocate memory for the scheduling policy KLV because + * Can't dynamically allocate memory for the scheduling policy KLV because * it will be sent from within the reset path. Need a fixed size lump on * the stack instead :(. * diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 0eaa1064242c..9400d0eb682b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -4267,20 +4267,25 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq, u8 new_guc_prio = map_i915_prio_to_guc_prio(prio); /* Short circuit function */ - if (prio < I915_PRIORITY_NORMAL || - rq->guc_prio == GUC_PRIO_FINI || - (rq->guc_prio != GUC_PRIO_INIT && - !new_guc_prio_higher(rq->guc_prio, new_guc_prio))) + if (prio < I915_PRIORITY_NORMAL) return; spin_lock(&ce->guc_state.lock); - if (rq->guc_prio != GUC_PRIO_FINI) { - if (rq->guc_prio != GUC_PRIO_INIT) - sub_context_inflight_prio(ce, rq->guc_prio); - rq->guc_prio = new_guc_prio; - add_context_inflight_prio(ce, rq->guc_prio); - update_context_prio(ce); - } + + if (rq->guc_prio == GUC_PRIO_FINI) + goto exit; + + if (!new_guc_prio_higher(rq->guc_prio, new_guc_prio)) + goto exit; + + if (rq->guc_prio != GUC_PRIO_INIT) + sub_context_inflight_prio(ce, rq->guc_prio); + + rq->guc_prio = new_guc_prio; + add_context_inflight_prio(ce, rq->guc_prio); + update_context_prio(ce); + +exit: spin_unlock(&ce->guc_state.lock); } diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index e93d2538f298..4d830740946d 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -90,7 +90,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, GEM_BUG_ON(!max_segment); - rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL | __GFP_NOWARN); if (!rsgt) return ERR_PTR(-ENOMEM); @@ -104,7 +104,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, } if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages), - GFP_KERNEL)) { + GFP_KERNEL | __GFP_NOWARN)) { i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } @@ -178,7 +178,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, GEM_BUG_ON(list_empty(blocks)); GEM_BUG_ON(!max_segment); - rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL | __GFP_NOWARN); if (!rsgt) return ERR_PTR(-ENOMEM); @@ -190,7 +190,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, return ERR_PTR(-E2BIG); } - if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) { + if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL | __GFP_NOWARN)) { i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } |