summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c12
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c18
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c3
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c31
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr_types.h6
25 files changed, 200 insertions, 125 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d891ab779ca7..5df21529b3b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1801,13 +1801,18 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
return -EINVAL;
+ /* Make sure VRAM is allocated contigiously */
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
- for (i = 0; i < (*bo)->placement.num_placement; i++)
- (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
- if (r)
- return r;
+ if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM &&
+ !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
+
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
+ for (i = 0; i < (*bo)->placement.num_placement; i++)
+ (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
+ if (r)
+ return r;
+ }
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 96316111300a..d272d95dd5b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -145,7 +145,7 @@ const char *amdgpu_asic_name[] = {
"LAST",
};
-#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM, 0)
+#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0)
/*
* Default init level where all blocks are expected to be initialized. This is
* the level of initialization expected by default and also after a full reset
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 31fd30dcd593..65bb26215e86 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -551,6 +551,8 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
for (i = 0; i < abo->placement.num_placement; ++i) {
abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+ if (abo->placements[i].mem_type == TTM_PL_VRAM)
+ abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8d9bf7a0857f..ddd7f05e4db9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -674,12 +674,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
- if (adev->gfx.enable_cleaner_shader &&
- ring->funcs->emit_cleaner_shader &&
- job->enforce_isolation)
- ring->funcs->emit_cleaner_shader(ring);
-
- if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
+ if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
+ !(job->enforce_isolation && !job->vmid))
return 0;
amdgpu_ring_ib_begin(ring);
@@ -690,6 +686,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
+ if (adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader &&
+ job->enforce_isolation)
+ ring->funcs->emit_cleaner_shader(ring);
+
if (vm_flush_needed) {
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index e2b3dda57030..54459254bd37 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -574,8 +576,12 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
{
int err;
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
- "amdgpu/%s_mec.bin", chip_name);
+ if (amdgpu_sriov_vf(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_sjt_mec.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 079131aeb2f7..3c8ab8698af8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1288,7 +1288,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
unsigned i;
/* No patching necessary for the first instance */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 7b826a136ceb..e5324c5bc6c7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1423,6 +1423,7 @@ err:
static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ bool cache_line_size_missing,
struct kfd_gpu_cache_info *pcache_info)
{
struct amdgpu_device *adev = kdev->adev;
@@ -1437,6 +1438,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2;
pcache_info[i].cache_line_size = adev->gfx.config.gc_tcp_cache_line_size;
+ if (cache_line_size_missing && !pcache_info[i].cache_line_size)
+ pcache_info[i].cache_line_size = 128;
i++;
}
/* Scalar L1 Instruction Cache per SQC */
@@ -1449,6 +1452,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
pcache_info[i].cache_line_size = adev->gfx.config.gc_instruction_cache_line_size;
+ if (cache_line_size_missing && !pcache_info[i].cache_line_size)
+ pcache_info[i].cache_line_size = 128;
i++;
}
/* Scalar L1 Data Cache per SQC */
@@ -1460,6 +1465,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
pcache_info[i].cache_line_size = adev->gfx.config.gc_scalar_data_cache_line_size;
+ if (cache_line_size_missing && !pcache_info[i].cache_line_size)
+ pcache_info[i].cache_line_size = 64;
i++;
}
/* GL1 Data Cache per SA */
@@ -1472,7 +1479,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
- pcache_info[i].cache_line_size = 0;
+ if (cache_line_size_missing)
+ pcache_info[i].cache_line_size = 128;
i++;
}
/* L2 Data Cache per GPU (Total Tex Cache) */
@@ -1484,6 +1492,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
pcache_info[i].cache_line_size = adev->gfx.config.gc_tcc_cache_line_size;
+ if (cache_line_size_missing && !pcache_info[i].cache_line_size)
+ pcache_info[i].cache_line_size = 128;
i++;
}
/* L3 Data Cache per GPU */
@@ -1494,7 +1504,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
- pcache_info[i].cache_line_size = 0;
+ pcache_info[i].cache_line_size = 64;
i++;
}
return i;
@@ -1569,6 +1579,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
{
int num_of_cache_types = 0;
+ bool cache_line_size_missing = false;
switch (kdev->adev->asic_type) {
case CHIP_KAVERI:
@@ -1692,10 +1703,17 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ /* Cacheline size not available in IP discovery for gc11.
+ * kfd_fill_gpu_cache_info_from_gfx_config to hard code it
+ */
+ cache_line_size_missing = true;
+ fallthrough;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
num_of_cache_types =
- kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info);
+ kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd,
+ cache_line_size_missing,
+ *pcache_info);
break;
default:
*pcache_info = dummy_cache_info;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c79fe9069e22..16b5daaa272f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -207,6 +207,21 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
+ if (!pdd->proc_ctx_cpu_ptr) {
+ r = amdgpu_amdkfd_alloc_gtt_mem(adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate process context bo\n");
+ return r;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
queue_input.process_id = qpd->pqm->process->pasid;
queue_input.page_table_base_addr = qpd->page_table_base;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index eacfeb32f35d..4b275937d05e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -306,7 +306,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
r = dma_mapping_error(dev, src[i]);
if (r) {
dev_err(dev, "%s: fail %d dma_map_page\n",
@@ -629,7 +629,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
goto out_oom;
}
- dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
r = dma_mapping_error(dev, dst[i]);
if (r) {
dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 87cd52cf4ee9..d0ee173acf82 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1076,7 +1076,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
kfd_free_process_doorbells(pdd->dev->kfd, pdd);
- if (pdd->dev->kfd->shared_resources.enable_mes)
+ if (pdd->dev->kfd->shared_resources.enable_mes &&
+ pdd->proc_ctx_cpu_ptr)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
&pdd->proc_ctx_bo);
/*
@@ -1608,7 +1609,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
- int retval = 0;
if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
return NULL;
@@ -1632,21 +1632,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);
- if (dev->kfd->shared_resources.enable_mes) {
- retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
- AMDGPU_MES_PROC_CTX_SIZE,
- &pdd->proc_ctx_bo,
- &pdd->proc_ctx_gpu_addr,
- &pdd->proc_ctx_cpu_ptr,
- false);
- if (retval) {
- dev_err(dev->adev->dev,
- "failed to allocate process context bo\n");
- goto err_free_pdd;
- }
- memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
- }
-
p->pdds[p->n_pdds++] = pdd;
if (kfd_dbg_is_per_vmid_supported(pdd->dev))
pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
@@ -1658,10 +1643,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
idr_init(&pdd->alloc_idr);
return pdd;
-
-err_free_pdd:
- kfree(pdd);
- return NULL;
}
/**
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index c76db22a1000..59b92d66e958 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -212,13 +212,17 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
void pqm_uninit(struct process_queue_manager *pqm)
{
struct process_queue_node *pqn, *next;
- struct kfd_process_device *pdd;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
if (pqn->q) {
- pdd = kfd_get_process_device_data(pqn->q->device, pqm->process);
- kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
- kfd_queue_release_buffers(pdd, &pqn->q->properties);
+ struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device,
+ pqm->process);
+ if (pdd) {
+ kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
+ kfd_queue_release_buffers(pdd, &pqn->q->properties);
+ } else {
+ WARN_ON(!pdd);
+ }
pqm_clean_queue_resource(pqm, pqn);
}
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 67a5de573943..d7acdd42d80f 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -164,6 +164,7 @@ enum amd_pp_task {
};
enum PP_SMC_POWER_PROFILE {
+ PP_SMC_POWER_PROFILE_UNKNOWN = -1,
PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 5eae14fe79f1..21bd635bcdfc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -764,6 +764,7 @@ static int smu_early_init(struct amdgpu_ip_block *ip_block)
smu->smu_baco.platform_support = false;
smu->smu_baco.maco_support = false;
smu->user_dpm_profile.fan_mode = -1;
+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
mutex_init(&smu->message_lock);
@@ -1248,6 +1249,21 @@ static bool smu_is_workload_profile_available(struct smu_context *smu,
return smu->workload_map && smu->workload_map[profile].valid_mapping;
}
+static void smu_init_power_profile(struct smu_context *smu)
+{
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) {
+ if (smu->is_apu ||
+ !smu_is_workload_profile_available(
+ smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
+ smu->power_profile_mode =
+ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ else
+ smu->power_profile_mode =
+ PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ }
+ smu_power_profile_mode_get(smu, smu->power_profile_mode);
+}
+
static int smu_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1269,13 +1285,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
- if (smu->is_apu ||
- !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
- smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- else
- smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- smu_power_profile_mode_get(smu, smu->power_profile_mode);
-
+ smu_init_power_profile(smu);
smu->display_config = &adev->pm.pm_display_cfg;
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index f4ac403b8b36..aabb94796005 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2810,4 +2810,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = smu_v13_0_7_workload_map;
smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 174753625bca..7cd902bbd244 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1343,6 +1343,17 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
intel_de_write_fw(display, reg, val);
}
+static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_reg_write_indexed(crtc_state->dsb_color_vblank, reg, val);
+ else
+ intel_de_write_fw(display, reg, val);
+}
+
static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
@@ -1357,19 +1368,29 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
lut = blob->data;
/*
- * DSB fails to correctly load the legacy LUT
- * unless we either write each entry twice,
- * or use non-posted writes
+ * DSB fails to correctly load the legacy LUT unless
+ * we either write each entry twice when using posted
+ * writes, or we use non-posted writes.
+ *
+ * If palette anti-collision is active during LUT
+ * register writes:
+ * - posted writes simply get dropped and thus the LUT
+ * contents may not be correctly updated
+ * - non-posted writes are blocked and thus the LUT
+ * contents are always correct, but simultaneous CPU
+ * MMIO access will start to fail
+ *
+ * Choose the lesser of two evils and use posted writes.
+ * Using posted writes is also faster, even when having
+ * to write each register twice.
*/
- if (crtc_state->dsb_color_vblank)
- intel_dsb_nonpost_start(crtc_state->dsb_color_vblank);
-
- for (i = 0; i < 256; i++)
+ for (i = 0; i < 256; i++) {
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
-
- if (crtc_state->dsb_color_vblank)
- intel_dsb_nonpost_end(crtc_state->dsb_color_vblank);
+ if (crtc_state->dsb_color_vblank)
+ ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+ }
}
static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,
@@ -1458,8 +1479,8 @@ static void bdw_load_lut_10(const struct intel_crtc_state *crtc_state,
prec_index);
for (i = 0; i < lut_size; i++)
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_10(&lut[i]));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_10(&lut[i]));
/*
* Reset the index, otherwise it prevents the legacy palette to be
@@ -1612,16 +1633,16 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
- DISPLAY_VER(display) >= 14 ?
- mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
+ ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ DISPLAY_VER(display) >= 14 ?
+ mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
}
/* Clamp values > 1.0. */
while (i++ < glk_degamma_lut_size(display))
- ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
- DISPLAY_VER(display) >= 14 ?
- 1 << 24 : 1 << 16);
+ ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ DISPLAY_VER(display) >= 14 ?
+ 1 << 24 : 1 << 16);
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
}
@@ -1687,10 +1708,10 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
@@ -1726,10 +1747,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/*
@@ -1747,10 +1768,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index b7b44399adaa..4d3785f5cb52 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -273,16 +273,20 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_
}
/**
- * intel_dsb_reg_write() - Emit register wriite to the DSB context
+ * intel_dsb_reg_write_indexed() - Emit register wriite to the DSB context
* @dsb: DSB context
* @reg: register address.
* @val: value.
*
* This function is used for writing register-value pair in command
* buffer of DSB.
+ *
+ * Note that indexed writes are slower than normal MMIO writes
+ * for a small number (less than 5 or so) of writes to the same
+ * register.
*/
-void intel_dsb_reg_write(struct intel_dsb *dsb,
- i915_reg_t reg, u32 val)
+void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val)
{
/*
* For example the buffer will look like below for 3 dwords for auto
@@ -340,6 +344,15 @@ void intel_dsb_reg_write(struct intel_dsb *dsb,
}
}
+void intel_dsb_reg_write(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val)
+{
+ intel_dsb_emit(dsb, val,
+ (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg));
+}
+
static u32 intel_dsb_mask_to_byte_en(u32 mask)
{
return (!!(mask & 0xff000000) << 3 |
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 33e0fc2ab380..da6df07a3c83 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -34,6 +34,8 @@ void intel_dsb_finish(struct intel_dsb *dsb);
void intel_dsb_cleanup(struct intel_dsb *dsb);
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
+void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val);
void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
i915_reg_t reg, u32 mask, u32 val);
void intel_dsb_noop(struct intel_dsb *dsb, int count);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 4eb58887819a..71c0daef1996 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1643,9 +1643,21 @@ capture_engine(struct intel_engine_cs *engine,
return NULL;
intel_engine_get_hung_entity(engine, &ce, &rq);
- if (rq && !i915_request_started(rq))
- drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
- engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
+ if (rq && !i915_request_started(rq)) {
+ /*
+ * We want to know also what is the guc_id of the context,
+ * but if we don't have the context reference, then skip
+ * printing it.
+ */
+ if (ce)
+ drm_info(&engine->gt->i915->drm,
+ "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
+ engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
+ else
+ drm_info(&engine->gt->i915->drm,
+ "Got hung context on %s with active request %lld:%lld not yet started\n",
+ engine->name, rq->fence.context, rq->fence.seqno);
+ }
if (rq) {
capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 762127dd56c5..70a854557e6e 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -506,6 +506,6 @@ int __init i915_scheduler_module_init(void)
return 0;
err_priorities:
- kmem_cache_destroy(slab_priorities);
+ kmem_cache_destroy(slab_dependencies);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 1a192a2a941b..3bbdb362d6f0 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -224,8 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(tiny)) {
- KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
- PTR_ERR(pt));
+ KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
+ PTR_ERR(tiny));
goto free_pt;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 3cb228c773cd..6146d1776bda 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -65,6 +65,14 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
__invalidation_fence_signal(xe, fence);
}
+void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ if (WARN_ON_ONCE(!fence->gt))
+ return;
+
+ __invalidation_fence_signal(gt_to_xe(fence->gt), fence);
+}
+
static void xe_gt_tlb_fence_timeout(struct work_struct *work)
{
struct xe_gt *gt = container_of(work, struct xe_gt,
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index f430d5797af7..00b1c6c01e8d 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -28,6 +28,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
bool stack);
+void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence);
static inline void
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index f27f579f4d85..797576690356 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1333,8 +1333,7 @@ static void invalidation_fence_cb(struct dma_fence *fence,
queue_work(system_wq, &ifence->work);
} else {
ifence->base.base.error = ifence->fence->error;
- dma_fence_signal(&ifence->base.base);
- dma_fence_put(&ifence->base.base);
+ xe_gt_tlb_invalidation_fence_signal(&ifence->base);
}
dma_fence_put(ifence->fence);
}
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index e1a0e27cda14..c13123008e90 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -27,46 +27,27 @@
#include "xe_reg_whitelist.h"
#include "xe_rtp_types.h"
-#define XE_REG_SR_GROW_STEP_DEFAULT 16
-
static void reg_sr_fini(struct drm_device *drm, void *arg)
{
struct xe_reg_sr *sr = arg;
+ struct xe_reg_sr_entry *entry;
+ unsigned long reg;
+
+ xa_for_each(&sr->xa, reg, entry)
+ kfree(entry);
xa_destroy(&sr->xa);
- kfree(sr->pool.arr);
- memset(&sr->pool, 0, sizeof(sr->pool));
}
int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
{
xa_init(&sr->xa);
- memset(&sr->pool, 0, sizeof(sr->pool));
- sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT;
sr->name = name;
return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init);
-static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr)
-{
- if (sr->pool.used == sr->pool.allocated) {
- struct xe_reg_sr_entry *arr;
-
- arr = krealloc_array(sr->pool.arr,
- ALIGN(sr->pool.allocated + 1, sr->pool.grow_step),
- sizeof(*arr), GFP_KERNEL);
- if (!arr)
- return NULL;
-
- sr->pool.arr = arr;
- sr->pool.allocated += sr->pool.grow_step;
- }
-
- return &sr->pool.arr[sr->pool.used++];
-}
-
static bool compatible_entries(const struct xe_reg_sr_entry *e1,
const struct xe_reg_sr_entry *e2)
{
@@ -112,7 +93,7 @@ int xe_reg_sr_add(struct xe_reg_sr *sr,
return 0;
}
- pentry = alloc_entry(sr);
+ pentry = kmalloc(sizeof(*pentry), GFP_KERNEL);
if (!pentry) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/xe/xe_reg_sr_types.h b/drivers/gpu/drm/xe/xe_reg_sr_types.h
index ad48a52b824a..ebe11f237fa2 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr_types.h
+++ b/drivers/gpu/drm/xe/xe_reg_sr_types.h
@@ -20,12 +20,6 @@ struct xe_reg_sr_entry {
};
struct xe_reg_sr {
- struct {
- struct xe_reg_sr_entry *arr;
- unsigned int used;
- unsigned int allocated;
- unsigned int grow_step;
- } pool;
struct xarray xa;
const char *name;