diff options
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo_evict.c | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo_evict.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_device_types.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pci.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pm.c | 55 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pm.h | 2 |
6 files changed, 84 insertions, 24 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index 2bf74eb7f281..748360fd2439 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -47,25 +47,17 @@ static int xe_bo_apply_to_pinned(struct xe_device *xe, } /** - * xe_bo_evict_all - evict all BOs from VRAM - * + * xe_bo_evict_all_user - evict all non-pinned user BOs from VRAM * @xe: xe device * - * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next - * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU. - * All eviction magic done via TTM calls. + * Evict non-pinned user BOs (via GPU). * * Evict == move VRAM BOs to temporary (typically system) memory. - * - * This function should be called before the device goes into a suspend state - * where the VRAM loses power. */ -int xe_bo_evict_all(struct xe_device *xe) +int xe_bo_evict_all_user(struct xe_device *xe) { struct ttm_device *bdev = &xe->ttm; - struct xe_tile *tile; u32 mem_type; - u8 id; int ret; /* User memory */ @@ -91,9 +83,34 @@ int xe_bo_evict_all(struct xe_device *xe) } } - ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external, - &xe->pinned.late.external, - xe_bo_evict_pinned); + return 0; +} + +/** + * xe_bo_evict_all - evict all BOs from VRAM + * @xe: xe device + * + * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next + * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU. + * All eviction magic done via TTM calls. + * + * Evict == move VRAM BOs to temporary (typically system) memory. + * + * This function should be called before the device goes into a suspend state + * where the VRAM loses power. + */ +int xe_bo_evict_all(struct xe_device *xe) +{ + struct xe_tile *tile; + u8 id; + int ret; + + ret = xe_bo_evict_all_user(xe); + if (ret) + return ret; + + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, + &xe->pinned.late.evicted, xe_bo_evict_pinned); if (!ret) ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, diff --git a/drivers/gpu/drm/xe/xe_bo_evict.h b/drivers/gpu/drm/xe/xe_bo_evict.h index d63eb3fc5cc9..e7f048634b32 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.h +++ b/drivers/gpu/drm/xe/xe_bo_evict.h @@ -9,6 +9,7 @@ struct xe_device; int xe_bo_evict_all(struct xe_device *xe); +int xe_bo_evict_all_user(struct xe_device *xe); int xe_bo_restore_early(struct xe_device *xe); int xe_bo_restore_late(struct xe_device *xe); diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index a42cb26e7d6d..3745389ead0d 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -522,6 +522,9 @@ struct xe_device { struct mutex lock; } d3cold; + /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */ + struct notifier_block pm_notifier; + /** @pmt: Support the PMT driver callback interface */ struct { /** @pmt.lock: protect access for telemetry data */ diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 07fe994f2a80..882398e09b7e 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -742,7 +742,7 @@ static void xe_pci_remove(struct pci_dev *pdev) return; xe_device_remove(xe); - xe_pm_runtime_fini(xe); + xe_pm_fini(xe); } /* diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 4e112fbacada..d8a411d3ee96 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -286,6 +286,29 @@ static u32 vram_threshold_value(struct xe_device *xe) return DEFAULT_VRAM_THRESHOLD; } +static int xe_pm_notifier_callback(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier); + int err = 0; + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + xe_pm_runtime_get(xe); + err = xe_bo_evict_all_user(xe); + xe_pm_runtime_put(xe); + if (err) + drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err); + break; + } + + if (err) + return NOTIFY_BAD; + + return NOTIFY_DONE; +} + /** * xe_pm_init - Initialize Xe Power Management * @xe: xe device instance @@ -299,6 +322,11 @@ int xe_pm_init(struct xe_device *xe) u32 vram_threshold; int err; + xe->pm_notifier.notifier_call = xe_pm_notifier_callback; + err = register_pm_notifier(&xe->pm_notifier); + if (err) + return err; + /* For now suspend/resume is only allowed with GuC */ if (!xe_device_uc_enabled(xe)) return 0; @@ -308,24 +336,23 @@ int xe_pm_init(struct xe_device *xe) if (xe->d3cold.capable) { err = xe_device_sysfs_init(xe); if (err) - return err; + goto err_unregister; vram_threshold = vram_threshold_value(xe); err = xe_pm_set_vram_threshold(xe, vram_threshold); if (err) - return err; + goto err_unregister; } xe_pm_runtime_init(xe); - return 0; + +err_unregister: + unregister_pm_notifier(&xe->pm_notifier); + return err; } -/** - * xe_pm_runtime_fini - Finalize Runtime PM - * @xe: xe device instance - */ -void xe_pm_runtime_fini(struct xe_device *xe) +static void xe_pm_runtime_fini(struct xe_device *xe) { struct device *dev = xe->drm.dev; @@ -333,6 +360,18 @@ void xe_pm_runtime_fini(struct xe_device *xe) pm_runtime_forbid(dev); } +/** + * xe_pm_fini - Finalize PM + * @xe: xe device instance + */ +void xe_pm_fini(struct xe_device *xe) +{ + if (xe_device_uc_enabled(xe)) + xe_pm_runtime_fini(xe); + + unregister_pm_notifier(&xe->pm_notifier); +} + static void xe_pm_write_callback_task(struct xe_device *xe, struct task_struct *task) { diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h index 998d1ed64556..59678b310e55 100644 --- a/drivers/gpu/drm/xe/xe_pm.h +++ b/drivers/gpu/drm/xe/xe_pm.h @@ -17,7 +17,7 @@ int xe_pm_resume(struct xe_device *xe); int xe_pm_init_early(struct xe_device *xe); int xe_pm_init(struct xe_device *xe); -void xe_pm_runtime_fini(struct xe_device *xe); +void xe_pm_fini(struct xe_device *xe); bool xe_pm_runtime_suspended(struct xe_device *xe); int xe_pm_runtime_suspend(struct xe_device *xe); int xe_pm_runtime_resume(struct xe_device *xe); |