diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno')
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_debugfs.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_power.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 137 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_device.c | 40 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.h | 22 |
12 files changed, 225 insertions, 91 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index fc2c905b6c9e..c9d11d57aed6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -117,13 +117,13 @@ reset_set(void *data, u64 val) if (a5xx_gpu->pm4_bo) { msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); - drm_gem_object_put_locked(a5xx_gpu->pm4_bo); + drm_gem_object_put(a5xx_gpu->pm4_bo); a5xx_gpu->pm4_bo = NULL; } if (a5xx_gpu->pfp_bo) { msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); - drm_gem_object_put_locked(a5xx_gpu->pfp_bo); + drm_gem_object_put(a5xx_gpu->pfp_bo); a5xx_gpu->pfp_bo = NULL; } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 7a271de9a212..5e2750eb3810 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -18,6 +18,18 @@ static void a5xx_dump(struct msm_gpu *gpu); #define GPU_PAS_ID 13 +static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (a5xx_gpu->has_whereami) { + OUT_PKT7(ring, CP_WHERE_AM_I, 2); + OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring))); + OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); + } +} + void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync) { @@ -30,11 +42,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, * Most flush operations need to issue a WHERE_AM_I opcode to sync up * the rptr shadow */ - if (a5xx_gpu->has_whereami && sync) { - OUT_PKT7(ring, CP_WHERE_AM_I, 2); - OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring))); - OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); - } + if (sync) + update_shadow_rptr(gpu, ring); spin_lock_irqsave(&ring->preempt_lock, flags); @@ -168,6 +177,16 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) ibs++; break; } + + /* + * Periodically update shadow-wptr if needed, so that we + * can see partial progress of submits with large # of + * cmds.. otherwise we could needlessly stall waiting for + * ringbuffer state, simply due to looking at a shadow + * rptr value that has not been updated + */ + if ((ibs % 32) == 0) + update_shadow_rptr(gpu, ring); } /* @@ -1415,7 +1434,7 @@ struct a5xx_gpu_state { static int a5xx_crashdumper_init(struct msm_gpu *gpu, struct a5xx_crashdumper *dumper) { - dumper->ptr = msm_gem_kernel_new_locked(gpu->dev, + dumper->ptr = msm_gem_kernel_new(gpu->dev, SZ_1M, MSM_BO_WC, gpu->aspace, &dumper->bo, &dumper->iova); @@ -1517,7 +1536,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, if (a5xx_crashdumper_run(gpu, &dumper)) { kfree(a5xx_state->hlsqregs); - msm_gem_kernel_put(dumper.bo, gpu->aspace, true); + msm_gem_kernel_put(dumper.bo, gpu->aspace); return; } @@ -1525,7 +1544,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K), count * sizeof(u32)); - msm_gem_kernel_put(dumper.bo, gpu->aspace, true); + msm_gem_kernel_put(dumper.bo, gpu->aspace); } static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index cdb165236a88..0e63a1429189 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -362,7 +362,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) */ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; - ptr = msm_gem_kernel_new_locked(drm, bosize, + ptr = msm_gem_kernel_new(drm, bosize, MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); if (IS_ERR(ptr)) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index ee72510ff8ce..8abc9a2b114a 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -240,7 +240,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, A5XX_PREEMPT_COUNTER_SIZE, MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova); if (IS_ERR(counters)) { - msm_gem_kernel_put(bo, gpu->aspace, true); + msm_gem_kernel_put(bo, gpu->aspace); return PTR_ERR(counters); } @@ -272,9 +272,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu) int i; for (i = 0; i < gpu->nr_rings; i++) { - msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true); - msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], - gpu->aspace, true); + msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace); + msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace); } } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index b349692219b7..a7c58018959f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -519,9 +519,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) if (!pdcptr) goto err; - if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) + if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu)) pdc_in_aop = true; - else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu)) + else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu)) pdc_address_offset = 0x30090; else pdc_address_offset = 0x30080; @@ -933,6 +933,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Use a known rate to bring up the GMU */ clk_set_rate(gmu->core_clk, 200000000); + clk_set_rate(gmu->hub_clk, 150000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); if (ret) { pm_runtime_put(gmu->gxpd); @@ -1129,12 +1130,12 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) { - msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false); + msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); + msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); + msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); + msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); + msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); + msm_gem_kernel_put(gmu->log.obj, gmu->aspace); gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); msm_gem_address_space_put(gmu->aspace); @@ -1393,6 +1394,9 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, gmu->nr_clocks, "gmu"); + gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, + gmu->nr_clocks, "hub"); + return 0; } @@ -1504,7 +1508,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) * are otherwise unused by a660. */ gmu->dummy.size = SZ_4K; - if (adreno_is_a660(adreno_gpu)) { + if (adreno_is_a660_family(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000); if (ret) goto err_memory; @@ -1522,7 +1526,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) SZ_16M - SZ_16K, 0x04000); if (ret) goto err_memory; - } else if (adreno_is_a640(adreno_gpu)) { + } else if (adreno_is_a640_family(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, SZ_256K - SZ_16K, 0x04000); if (ret) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 71dfa60070cc..3c74f64e3126 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -66,6 +66,7 @@ struct a6xx_gmu { int nr_clocks; struct clk_bulk_data *clocks; struct clk *core_clk; + struct clk *hub_clk; /* current performance index set externally */ int current_perf_index; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 9c5e4618aa0a..40c9fef457a4 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -52,21 +52,25 @@ static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return true; } -static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - uint32_t wptr; - unsigned long flags; /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */ if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { - struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - OUT_PKT7(ring, CP_WHERE_AM_I, 2); OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring))); OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring))); } +} + +static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + uint32_t wptr; + unsigned long flags; + + update_shadow_rptr(gpu, ring); spin_lock_irqsave(&ring->preempt_lock, flags); @@ -145,7 +149,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring; - unsigned int i; + unsigned int i, ibs = 0; a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); @@ -181,8 +185,19 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); OUT_RING(ring, submit->cmd[i].size); + ibs++; break; } + + /* + * Periodically update shadow-wptr if needed, so that we + * can see partial progress of submits with large # of + * cmds.. otherwise we could needlessly stall waiting for + * ringbuffer state, simply due to looking at a shadow + * rptr value that has not been updated + */ + if ((ibs % 32) == 0) + update_shadow_rptr(gpu, ring); } get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), @@ -652,7 +667,7 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) regs = a650_protect; count = ARRAY_SIZE(a650_protect); count_max = 48; - } else if (adreno_is_a660(adreno_gpu)) { + } else if (adreno_is_a660_family(adreno_gpu)) { regs = a660_protect; count = ARRAY_SIZE(a660_protect); count_max = 48; @@ -683,7 +698,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) if (adreno_is_a618(adreno_gpu)) return; - if (adreno_is_a640(adreno_gpu)) + if (adreno_is_a640_family(adreno_gpu)) amsbc = 1; if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) { @@ -694,6 +709,13 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) uavflagprd_inv = 2; } + if (adreno_is_7c3(adreno_gpu)) { + lower_bit = 1; + amsbc = 1; + rgb565_predicator = 1; + uavflagprd_inv = 2; + } + gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1); gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1); @@ -740,6 +762,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; + const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE]; u32 *buf = msm_gem_get_vaddr(obj); bool ret = false; @@ -756,8 +779,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, * * a660 targets have all the critical security fixes from the start */ - if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) || - adreno_is_a640(adreno_gpu)) { + if (!strcmp(sqe_name, "a630_sqe.fw")) { /* * If the lowest nibble is 0xa that is an indication that this * microcode has been patched. The actual version is in dword @@ -778,7 +800,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, DRM_DEV_ERROR(&gpu->pdev->dev, "a630 SQE ucode is too old. Have version %x need at least %x\n", buf[0] & 0xfff, 0x190); - } else if (adreno_is_a650(adreno_gpu)) { + } else if (!strcmp(sqe_name, "a650_sqe.fw")) { if ((buf[0] & 0xfff) >= 0x095) { ret = true; goto out; @@ -787,7 +809,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, DRM_DEV_ERROR(&gpu->pdev->dev, "a650 SQE ucode is too old. Have version %x need at least %x\n", buf[0] & 0xfff, 0x095); - } else if (adreno_is_a660(adreno_gpu)) { + } else if (!strcmp(sqe_name, "a660_sqe.fw")) { ret = true; } else { DRM_DEV_ERROR(&gpu->pdev->dev, @@ -897,7 +919,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu) a6xx_set_hwcg(gpu, true); /* VBIF/GBIF start*/ - if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) { + if (adreno_is_a640_family(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) { gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); @@ -935,13 +958,14 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); - if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) + if (adreno_is_a640_family(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); else gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); - if (adreno_is_a660(adreno_gpu)) + if (adreno_is_a660_family(adreno_gpu)) gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); /* Setting the mem pool size */ @@ -952,8 +976,10 @@ static int a6xx_hw_init(struct msm_gpu *gpu) */ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); - else if (adreno_is_a640(adreno_gpu)) + else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu)) gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200); + else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); else gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000); @@ -990,13 +1016,15 @@ static int a6xx_hw_init(struct msm_gpu *gpu) /* Protect registers from the CP */ a6xx_set_cp_protect(gpu); - if (adreno_is_a660(adreno_gpu)) { + if (adreno_is_a660_family(adreno_gpu)) { gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); - /* Set dualQ + disable afull for A660 GPU but not for A635 */ - gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); } + /* Set dualQ + disable afull for A660 GPU */ + if (adreno_is_a660(adreno_gpu)) + gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); + /* Enable expanded apriv for targets that support it */ if (gpu->hw_apriv) { gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, @@ -1035,7 +1063,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu) if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) { if (!a6xx_gpu->shadow_bo) { - a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev, + a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, sizeof(u32) * gpu->nr_rings, MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &a6xx_gpu->shadow_bo, @@ -1383,13 +1411,13 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - u32 cntl1_regval = 0; + u32 gpu_scid, cntl1_regval = 0; if (IS_ERR(a6xx_gpu->llc_mmio)) return; if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { - u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); gpu_scid &= 0x1f; cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | @@ -1409,26 +1437,34 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) } } - if (cntl1_regval) { + if (!cntl1_regval) + return; + + /* + * Program the slice IDs for the various GPU blocks and GPU MMU + * pagetables + */ + if (!a6xx_gpu->have_mmu500) { + a6xx_llc_write(a6xx_gpu, + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval); + /* - * Program the slice IDs for the various GPU blocks and GPU MMU - * pagetables + * Program cacheability overrides to not allocate cache + * lines on a write miss */ - if (a6xx_gpu->have_mmu500) - gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), - cntl1_regval); - else { - a6xx_llc_write(a6xx_gpu, - REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval); - - /* - * Program cacheability overrides to not allocate cache - * lines on a write miss - */ - a6xx_llc_rmw(a6xx_gpu, - REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03); - } + a6xx_llc_rmw(a6xx_gpu, + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03); + return; } + + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); + + /* On A660, the SCID programming for UCHE traffic is done in + * A6XX_GBIF_SCACHE_CNTL0[14:10] + */ + if (adreno_is_a660_family(adreno_gpu)) + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | + (1 << 8), (gpu_scid << 10) | (1 << 8)); } static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) @@ -1477,7 +1513,7 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) if (ret) return ret; - msm_gpu_resume_devfreq(gpu); + msm_devfreq_resume(gpu); a6xx_llc_activate(a6xx_gpu); @@ -1494,7 +1530,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) a6xx_llc_deactivate(a6xx_gpu); - devfreq_suspend_device(gpu->devfreq.devfreq); + msm_devfreq_suspend(gpu); ret = a6xx_gmu_stop(a6xx_gpu); if (ret) @@ -1667,11 +1703,11 @@ static u32 a618_get_speed_bin(u32 fuse) return UINT_MAX; } -static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse) +static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) { u32 val = UINT_MAX; - if (revn == 618) + if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev)) val = a618_get_speed_bin(fuse); if (val == UINT_MAX) { @@ -1684,14 +1720,13 @@ static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse) return (1 << val); } -static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, - u32 revn) +static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) { u32 supp_hw = UINT_MAX; - u16 speedbin; + u32 speedbin; int ret; - ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin); + ret = nvmem_cell_read_variable_le_u32(dev, "speed_bin", &speedbin); /* * -ENOENT means that the platform doesn't support speedbin which is * fine @@ -1704,9 +1739,8 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, ret); goto done; } - speedbin = le16_to_cpu(speedbin); - supp_hw = fuse_to_supp_hw(dev, revn, speedbin); + supp_hw = fuse_to_supp_hw(dev, rev, speedbin); done: ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); @@ -1772,12 +1806,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) */ info = adreno_info(config->rev); - if (info && (info->revn == 650 || info->revn == 660)) + if (info && (info->revn == 650 || info->revn == 660 || + adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev))) adreno_gpu->base.hw_apriv = true; a6xx_llc_slices_init(pdev, a6xx_gpu); - ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn); + ret = a6xx_set_supported_hw(&pdev->dev, config->rev); if (ret) { a6xx_destroy(&(a6xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index ad4ea0ed5d99..e8f65cd8eca6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -112,7 +112,7 @@ static void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src, static int a6xx_crashdumper_init(struct msm_gpu *gpu, struct a6xx_crashdumper *dumper) { - dumper->ptr = msm_gem_kernel_new_locked(gpu->dev, + dumper->ptr = msm_gem_kernel_new(gpu->dev, SZ_1M, MSM_BO_WC, gpu->aspace, &dumper->bo, &dumper->iova); @@ -961,7 +961,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) a6xx_get_clusters(gpu, a6xx_state, dumper); a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper); - msm_gem_kernel_put(dumper->bo, gpu->aspace, true); + msm_gem_kernel_put(dumper->bo, gpu->aspace); } if (snapshot_debugbus) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index 919433732b43..d4c65bf0a1b7 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -382,6 +382,36 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) msg->cnoc_cmds_data[1][0] = 0x60000001; } +static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; + + msg->ddr_cmds_addrs[0] = 0x50004; + msg->ddr_cmds_addrs[1] = 0x50000; + msg->ddr_cmds_addrs[2] = 0x50088; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x5006c; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) { /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ @@ -428,10 +458,12 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) if (adreno_is_a618(adreno_gpu)) a618_build_bw_table(&msg); - else if (adreno_is_a640(adreno_gpu)) + else if (adreno_is_a640_family(adreno_gpu)) a640_build_bw_table(&msg); else if (adreno_is_a650(adreno_gpu)) a650_build_bw_table(&msg); + else if (adreno_is_7c3(adreno_gpu)) + adreno_7c3_build_bw_table(&msg); else if (adreno_is_a660(adreno_gpu)) a660_build_bw_table(&msg); else diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 6dad8015c9a1..2a6ce76656aa 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -8,8 +8,6 @@ #include "adreno_gpu.h" -#define ANY_ID 0xff - bool hang_debug = false; MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); module_param_named(hang_debug, hang_debug, bool, 0600); @@ -300,6 +298,30 @@ static const struct adreno_info gpulist[] = { .init = a6xx_gpu_init, .zapfw = "a660_zap.mdt", .hwcg = a660_hwcg, + }, { + .rev = ADRENO_REV(6, 3, 5, ANY_ID), + .name = "Adreno 7c Gen 3", + .fw = { + [ADRENO_FW_SQE] = "a660_sqe.fw", + [ADRENO_FW_GMU] = "a660_gmu.bin", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .hwcg = a660_hwcg, + }, { + .rev = ADRENO_REV(6, 8, 0, ANY_ID), + .revn = 680, + .name = "A680", + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a640_gmu.bin", + }, + .gmem = SZ_2M, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a640_zap.mdt", + .hwcg = a640_hwcg, }, }; @@ -325,6 +347,15 @@ static inline bool _rev_match(uint8_t entry, uint8_t id) return (entry == ANY_ID) || (entry == id); } +bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2) +{ + + return _rev_match(rev1.core, rev2.core) && + _rev_match(rev1.major, rev2.major) && + _rev_match(rev1.minor, rev2.minor) && + _rev_match(rev1.patchid, rev2.patchid); +} + const struct adreno_info *adreno_info(struct adreno_rev rev) { int i; @@ -332,10 +363,7 @@ const struct adreno_info *adreno_info(struct adreno_rev rev) /* identify gpu: */ for (i = 0; i < ARRAY_SIZE(gpulist); i++) { const struct adreno_info *info = &gpulist[i]; - if (_rev_match(info->rev.core, rev.core) && - _rev_match(info->rev.major, rev.major) && - _rev_match(info->rev.minor, rev.minor) && - _rev_match(info->rev.patchid, rev.patchid)) + if (adreno_cmp_rev(info->rev, rev)) return info; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 9f5a30234b33..748665232d29 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -261,8 +261,8 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) return ret; } return -EINVAL; - case MSM_PARAM_NR_RINGS: - *value = gpu->nr_rings; + case MSM_PARAM_PRIORITIES: + *value = gpu->nr_rings * NR_SCHED_PRIORITIES; return 0; case MSM_PARAM_PP_PGTABLE: *value = 0; @@ -390,7 +390,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, struct drm_gem_object *bo; void *ptr; - ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, + ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); if (IS_ERR(ptr)) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 8dbe0d157520..225c277a6223 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -42,6 +42,8 @@ struct adreno_rev { uint8_t patchid; }; +#define ANY_ID 0xff + #define ADRENO_REV(core, major, minor, patchid) \ ((struct adreno_rev){ core, major, minor, patchid }) @@ -141,6 +143,8 @@ struct adreno_platform_config { __ret; \ }) +bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2); + static inline bool adreno_is_a2xx(struct adreno_gpu *gpu) { return (gpu->revn < 300); @@ -237,9 +241,9 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu) return gpu->revn == 630; } -static inline int adreno_is_a640(struct adreno_gpu *gpu) +static inline int adreno_is_a640_family(struct adreno_gpu *gpu) { - return gpu->revn == 640; + return (gpu->revn == 640) || (gpu->revn == 680); } static inline int adreno_is_a650(struct adreno_gpu *gpu) @@ -247,15 +251,27 @@ static inline int adreno_is_a650(struct adreno_gpu *gpu) return gpu->revn == 650; } +static inline int adreno_is_7c3(struct adreno_gpu *gpu) +{ + /* The order of args is important here to handle ANY_ID correctly */ + return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev); +} + static inline int adreno_is_a660(struct adreno_gpu *gpu) { return gpu->revn == 660; } +static inline int adreno_is_a660_family(struct adreno_gpu *gpu) +{ + return adreno_is_a660(gpu) || adreno_is_7c3(gpu); +} + /* check for a650, a660, or any derivatives */ static inline int adreno_is_a650_family(struct adreno_gpu *gpu) { - return gpu->revn == 650 || gpu->revn == 620 || gpu->revn == 660; + return gpu->revn == 650 || gpu->revn == 620 || + adreno_is_a660_family(gpu); } int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); |