diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device.c | 222 |
1 files changed, 114 insertions, 108 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 3f95f7cb4019..621a3b53a038 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -26,7 +26,7 @@ #include <linux/slab.h> #include "kfd_priv.h" #include "kfd_device_queue_manager.h" -#include "kfd_pm4_headers.h" +#include "kfd_pm4_headers_vi.h" #define MQD_SIZE_ALIGNED 768 @@ -92,17 +92,22 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size); static void kfd_gtt_sa_fini(struct kfd_dev *kfd); +static int kfd_resume(struct kfd_dev *kfd); + static const struct kfd_device_info *lookup_device_info(unsigned short did) { size_t i; for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { if (supported_devices[i].did == did) { - BUG_ON(supported_devices[i].device_info == NULL); + WARN_ON(!supported_devices[i].device_info); return supported_devices[i].device_info; } } + dev_warn(kfd_device, "DID %04x is missing in supported_devices\n", + did); + return NULL; } @@ -114,8 +119,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, const struct kfd_device_info *device_info = lookup_device_info(pdev->device); - if (!device_info) + if (!device_info) { + dev_err(kfd_device, "kgd2kfd_probe failed\n"); return NULL; + } kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); if (!kfd) @@ -152,33 +159,20 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd) } if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) { - dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n", + dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n", (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, - (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0); + (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) + != 0); return false; } pasid_limit = min_t(unsigned int, - (unsigned int)1 << kfd->device_info->max_pasid_bits, + (unsigned int)(1 << kfd->device_info->max_pasid_bits), iommu_info.max_pasids); - /* - * last pasid is used for kernel queues doorbells - * in the future the last pasid might be used for a kernel thread. - */ - pasid_limit = min_t(unsigned int, - pasid_limit, - kfd->doorbell_process_limit - 1); - - err = amd_iommu_init_device(kfd->pdev, pasid_limit); - if (err < 0) { - dev_err(kfd_device, "error initializing iommu device\n"); - return false; - } if (!kfd_set_pasid_limit(pasid_limit)) { dev_err(kfd_device, "error setting pasid limit\n"); - amd_iommu_free_device(kfd->pdev); return false; } @@ -190,7 +184,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); if (dev) - kfd_unbind_process_from_device(dev, pasid); + kfd_process_iommu_unbind_callback(dev, pasid); } /* @@ -211,9 +205,8 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, flags); dev = kfd_device_by_pci_dev(pdev); - BUG_ON(dev == NULL); - - kfd_signal_iommu_event(dev, pasid, address, + if (!WARN_ON(!dev)) + kfd_signal_iommu_event(dev, pasid, address, flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); return AMD_IOMMU_INV_PRI_RSP_INVALID; @@ -226,6 +219,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->shared_resources = *gpu_resources; + kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; + kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; + kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd + - kfd->vm_info.first_vmid_kfd + 1; + /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * kfd->device_info->mqd_size_aligned; @@ -234,9 +232,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * calculate max size of runlist packet. * There can be only 2 packets at once */ - size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) + - max_num_of_queues_per_device * - sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2; + size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) + + max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) + + sizeof(struct pm4_mes_runlist)) * 2; /* Add size of HIQ & DIQ */ size += KFD_KERNEL_QUEUE_SIZE * 2; @@ -247,89 +245,76 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd->kfd2kgd->init_gtt_mem_allocation( kfd->kgd, size, &kfd->gtt_mem, &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ - dev_err(kfd_device, - "Could not allocate %d bytes for device (%x:%x)\n", - size, kfd->pdev->vendor, kfd->pdev->device); + dev_err(kfd_device, "Could not allocate %d bytes\n", size); goto out; } - dev_info(kfd_device, - "Allocated %d bytes on gart for device(%x:%x)\n", - size, kfd->pdev->vendor, kfd->pdev->device); + dev_info(kfd_device, "Allocated %d bytes on gart\n", size); /* Initialize GTT sa with 512 byte chunk size */ if (kfd_gtt_sa_init(kfd, size, 512) != 0) { - dev_err(kfd_device, - "Error initializing gtt sub-allocator\n"); + dev_err(kfd_device, "Error initializing gtt sub-allocator\n"); goto kfd_gtt_sa_init_error; } - kfd_doorbell_init(kfd); - - if (kfd_topology_add_device(kfd) != 0) { + if (kfd_doorbell_init(kfd)) { dev_err(kfd_device, - "Error adding device (%x:%x) to topology\n", - kfd->pdev->vendor, kfd->pdev->device); + "Error initializing doorbell aperture\n"); + goto kfd_doorbell_error; + } + + if (kfd_topology_add_device(kfd)) { + dev_err(kfd_device, "Error adding device to topology\n"); goto kfd_topology_add_device_error; } if (kfd_interrupt_init(kfd)) { - dev_err(kfd_device, - "Error initializing interrupts for device (%x:%x)\n", - kfd->pdev->vendor, kfd->pdev->device); + dev_err(kfd_device, "Error initializing interrupts\n"); goto kfd_interrupt_error; } - if (!device_iommu_pasid_init(kfd)) { - dev_err(kfd_device, - "Error initializing iommuv2 for device (%x:%x)\n", - kfd->pdev->vendor, kfd->pdev->device); - goto device_iommu_pasid_error; - } - amd_iommu_set_invalidate_ctx_cb(kfd->pdev, - iommu_pasid_shutdown_callback); - amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb); - kfd->dqm = device_queue_manager_init(kfd); if (!kfd->dqm) { - dev_err(kfd_device, - "Error initializing queue manager for device (%x:%x)\n", - kfd->pdev->vendor, kfd->pdev->device); + dev_err(kfd_device, "Error initializing queue manager\n"); goto device_queue_manager_error; } - if (kfd->dqm->ops.start(kfd->dqm) != 0) { + if (!device_iommu_pasid_init(kfd)) { dev_err(kfd_device, - "Error starting queuen manager for device (%x:%x)\n", + "Error initializing iommuv2 for device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); - goto dqm_start_error; + goto device_iommu_pasid_error; } + if (kfd_resume(kfd)) + goto kfd_resume_error; + kfd->dbgmgr = NULL; kfd->init_complete = true; - dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor, + dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); - pr_debug("kfd: Starting kfd with the following scheduling policy %d\n", + pr_debug("Starting kfd with the following scheduling policy %d\n", sched_policy); goto out; -dqm_start_error: +kfd_resume_error: +device_iommu_pasid_error: device_queue_manager_uninit(kfd->dqm); device_queue_manager_error: - amd_iommu_free_device(kfd->pdev); -device_iommu_pasid_error: kfd_interrupt_exit(kfd); kfd_interrupt_error: kfd_topology_remove_device(kfd); kfd_topology_add_device_error: + kfd_doorbell_fini(kfd); +kfd_doorbell_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); dev_err(kfd_device, - "device (%x:%x) NOT added due to errors\n", + "device %x:%x NOT added due to errors\n", kfd->pdev->vendor, kfd->pdev->device); out: return kfd->init_complete; @@ -338,10 +323,11 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { + kgd2kfd_suspend(kfd); device_queue_manager_uninit(kfd->dqm); - amd_iommu_free_device(kfd->pdev); kfd_interrupt_exit(kfd); kfd_topology_remove_device(kfd); + kfd_doorbell_fini(kfd); kfd_gtt_sa_fini(kfd); kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); } @@ -351,36 +337,59 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) void kgd2kfd_suspend(struct kfd_dev *kfd) { - BUG_ON(kfd == NULL); + if (!kfd->init_complete) + return; - if (kfd->init_complete) { - kfd->dqm->ops.stop(kfd->dqm); - amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); - amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); - amd_iommu_free_device(kfd->pdev); - } + kfd->dqm->ops.stop(kfd->dqm); + + kfd_unbind_processes_from_device(kfd); + + amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); + amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); + amd_iommu_free_device(kfd->pdev); } int kgd2kfd_resume(struct kfd_dev *kfd) { - unsigned int pasid_limit; - int err; + if (!kfd->init_complete) + return 0; - BUG_ON(kfd == NULL); + return kfd_resume(kfd); - pasid_limit = kfd_get_pasid_limit(); +} - if (kfd->init_complete) { - err = amd_iommu_init_device(kfd->pdev, pasid_limit); - if (err < 0) - return -ENXIO; - amd_iommu_set_invalidate_ctx_cb(kfd->pdev, - iommu_pasid_shutdown_callback); - amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb); - kfd->dqm->ops.start(kfd->dqm); +static int kfd_resume(struct kfd_dev *kfd) +{ + int err = 0; + unsigned int pasid_limit = kfd_get_pasid_limit(); + + err = amd_iommu_init_device(kfd->pdev, pasid_limit); + if (err) + return -ENXIO; + amd_iommu_set_invalidate_ctx_cb(kfd->pdev, + iommu_pasid_shutdown_callback); + amd_iommu_set_invalid_ppr_cb(kfd->pdev, + iommu_invalid_ppr_cb); + + err = kfd_bind_processes_to_device(kfd); + if (err) + goto processes_bind_error; + + err = kfd->dqm->ops.start(kfd->dqm); + if (err) { + dev_err(kfd_device, + "Error starting queue manager for device %x:%x\n", + kfd->pdev->vendor, kfd->pdev->device); + goto dqm_start_error; } - return 0; + return err; + +dqm_start_error: +processes_bind_error: + amd_iommu_free_device(kfd->pdev); + + return err; } /* This is called directly from KGD at ISR. */ @@ -394,7 +403,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) if (kfd->interrupts_active && interrupt_is_wanted(kfd, ih_ring_entry) && enqueue_ih_ring_entry(kfd, ih_ring_entry)) - schedule_work(&kfd->interrupt_work); + queue_work(kfd->ih_wq, &kfd->interrupt_work); spin_unlock(&kfd->interrupt_lock); } @@ -402,26 +411,27 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size) { - unsigned int num_of_bits; + unsigned int num_of_longs; - BUG_ON(!kfd); - BUG_ON(!kfd->gtt_mem); - BUG_ON(buf_size < chunk_size); - BUG_ON(buf_size == 0); - BUG_ON(chunk_size == 0); + if (WARN_ON(buf_size < chunk_size)) + return -EINVAL; + if (WARN_ON(buf_size == 0)) + return -EINVAL; + if (WARN_ON(chunk_size == 0)) + return -EINVAL; kfd->gtt_sa_chunk_size = chunk_size; kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; - num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE; - BUG_ON(num_of_bits == 0); + num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) / + BITS_PER_LONG; - kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL); + kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL); if (!kfd->gtt_sa_bitmap) return -ENOMEM; - pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", + pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); mutex_init(&kfd->gtt_sa_lock); @@ -455,8 +465,6 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, { unsigned int found, start_search, cur_size; - BUG_ON(!kfd); - if (size == 0) return -EINVAL; @@ -467,7 +475,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, if ((*mem_obj) == NULL) return -ENOMEM; - pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size); + pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size); start_search = 0; @@ -479,7 +487,7 @@ kfd_gtt_restart_search: kfd->gtt_sa_num_of_chunks, start_search); - pr_debug("kfd: found = %d\n", found); + pr_debug("Found = %d\n", found); /* If there wasn't any free chunk, bail out */ if (found == kfd->gtt_sa_num_of_chunks) @@ -497,12 +505,12 @@ kfd_gtt_restart_search: found, kfd->gtt_sa_chunk_size); - pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n", + pr_debug("gpu_addr = %p, cpu_addr = %p\n", (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); /* If we need only one chunk, mark it as allocated and get out */ if (size <= kfd->gtt_sa_chunk_size) { - pr_debug("kfd: single bit\n"); + pr_debug("Single bit\n"); set_bit(found, kfd->gtt_sa_bitmap); goto kfd_gtt_out; } @@ -537,7 +545,7 @@ kfd_gtt_restart_search: } while (cur_size > 0); - pr_debug("kfd: range_start = %d, range_end = %d\n", + pr_debug("range_start = %d, range_end = %d\n", (*mem_obj)->range_start, (*mem_obj)->range_end); /* Mark the chunks as allocated */ @@ -551,7 +559,7 @@ kfd_gtt_out: return 0; kfd_gtt_no_free_chunk: - pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj); + pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); mutex_unlock(&kfd->gtt_sa_lock); kfree(mem_obj); return -ENOMEM; @@ -561,13 +569,11 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) { unsigned int bit; - BUG_ON(!kfd); - /* Act like kfree when trying to free a NULL object */ if (!mem_obj) return 0; - pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n", + pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n", mem_obj, mem_obj->range_start, mem_obj->range_end); mutex_lock(&kfd->gtt_sa_lock); |