summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gmu.c')
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c64
1 files changed, 50 insertions, 14 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 7f5bc73b2040..e16b4b3f8535 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -621,6 +621,8 @@ setup_pdc:
/* ensure no writes happen before the uCode is fully written */
wmb();
+ a6xx_rpmh_stop(gmu);
+
err:
if (!IS_ERR_OR_NULL(pdcptr))
iounmap(pdcptr);
@@ -753,7 +755,6 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
- static bool rpmh_init;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int ret;
@@ -776,15 +777,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
/* Turn on register retention */
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
- /* We only need to load the RPMh microcode once */
- if (!rpmh_init) {
- a6xx_gmu_rpmh_init(gmu);
- rpmh_init = true;
- } else {
- ret = a6xx_rpmh_start(gmu);
- if (ret)
- return ret;
- }
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
ret = a6xx_gmu_fw_load(gmu);
if (ret)
@@ -1482,6 +1477,12 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
pm_runtime_force_suspend(gmu->dev);
+ /*
+ * Since cxpd is a virt device, the devlink with gmu-dev will be removed
+ * automatically when we do detach
+ */
+ dev_pm_domain_detach(gmu->cxpd, false);
+
if (!IS_ERR_OR_NULL(gmu->gxpd)) {
pm_runtime_disable(gmu->gxpd);
dev_pm_domain_detach(gmu->gxpd, false);
@@ -1504,6 +1505,17 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
gmu->initialized = false;
}
+static int cxpd_notifier_cb(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb);
+
+ if (action == GENPD_NOTIFY_OFF)
+ complete_all(&gmu->pd_gate);
+
+ return 0;
+}
+
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
@@ -1608,8 +1620,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (adreno_is_a650_family(adreno_gpu)) {
gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
- if (IS_ERR(gmu->rscc))
+ if (IS_ERR(gmu->rscc)) {
+ ret = -ENODEV;
goto err_mmio;
+ }
} else {
gmu->rscc = gmu->mmio + 0x23000;
}
@@ -1618,8 +1632,26 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
- if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
+ if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) {
+ ret = -ENODEV;
goto err_mmio;
+ }
+
+ gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
+ if (IS_ERR(gmu->cxpd)) {
+ ret = PTR_ERR(gmu->cxpd);
+ goto err_mmio;
+ }
+
+ if (!device_link_add(gmu->dev, gmu->cxpd,
+ DL_FLAG_PM_RUNTIME)) {
+ ret = -ENODEV;
+ goto detach_cxpd;
+ }
+
+ init_completion(&gmu->pd_gate);
+ complete_all(&gmu->pd_gate);
+ gmu->pd_nb.notifier_call = cxpd_notifier_cb;
/*
* Get a link to the GX power domain to reset the GPU in case of GMU
@@ -1633,10 +1665,16 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
+ /* Initialize RPMh */
+ a6xx_gmu_rpmh_init(gmu);
+
gmu->initialized = true;
return 0;
+detach_cxpd:
+ dev_pm_domain_detach(gmu->cxpd, false);
+
err_mmio:
iounmap(gmu->mmio);
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
@@ -1644,8 +1682,6 @@ err_mmio:
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
- ret = -ENODEV;
-
err_memory:
a6xx_gmu_memory_free(gmu);
err_put_device: