diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 135 |
1 files changed, 126 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 3938fca1ea8e..61d94c745672 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -50,16 +50,19 @@ static inline bool amdgpu_has_atpx(void) { return false; } * This is the main unload function for KMS (all asics). * Returns 0 on success. */ -int amdgpu_driver_unload_kms(struct drm_device *dev) +void amdgpu_driver_unload_kms(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; if (adev == NULL) - return 0; + return; if (adev->rmmio == NULL) goto done_free; + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + if (amdgpu_device_is_px(dev)) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); @@ -74,7 +77,6 @@ int amdgpu_driver_unload_kms(struct drm_device *dev) done_free: kfree(adev); dev->dev_private = NULL; - return 0; } /** @@ -139,6 +141,9 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) pm_runtime_put_autosuspend(dev->dev); } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, true); + out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ @@ -308,10 +313,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file } for (i = 0; i < adev->num_ip_blocks; i++) { - if (adev->ip_blocks[i].type == type && - adev->ip_block_status[i].valid) { - ip.hw_ip_version_major = adev->ip_blocks[i].major; - ip.hw_ip_version_minor = adev->ip_blocks[i].minor; + if (adev->ip_blocks[i].version->type == type && + adev->ip_blocks[i].status.valid) { + ip.hw_ip_version_major = adev->ip_blocks[i].version->major; + ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor; ip.capabilities_flags = 0; ip.available_rings = ring_mask; ip.ib_start_alignment = ib_start_alignment; @@ -347,8 +352,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file } for (i = 0; i < adev->num_ip_blocks; i++) - if (adev->ip_blocks[i].type == type && - adev->ip_block_status[i].valid && + if (adev->ip_blocks[i].version->type == type && + adev->ip_blocks[i].status.valid && count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) count++; @@ -413,6 +418,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return copy_to_user(out, &vram_gtt, min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; } + case AMDGPU_INFO_MEMORY: { + struct drm_amdgpu_memory_info mem; + + memset(&mem, 0, sizeof(mem)); + mem.vram.total_heap_size = adev->mc.real_vram_size; + mem.vram.usable_heap_size = + adev->mc.real_vram_size - adev->vram_pin_size; + mem.vram.heap_usage = atomic64_read(&adev->vram_usage); + mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; + + mem.cpu_accessible_vram.total_heap_size = + adev->mc.visible_vram_size; + mem.cpu_accessible_vram.usable_heap_size = + adev->mc.visible_vram_size - + (adev->vram_pin_size - adev->invisible_pin_size); + mem.cpu_accessible_vram.heap_usage = + atomic64_read(&adev->vram_vis_usage); + mem.cpu_accessible_vram.max_allocation = + mem.cpu_accessible_vram.usable_heap_size * 3 / 4; + + mem.gtt.total_heap_size = adev->mc.gtt_size; + mem.gtt.usable_heap_size = + adev->mc.gtt_size - adev->gart_pin_size; + mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage); + mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; + + return copy_to_user(out, &mem, + min((size_t)size, sizeof(mem))) + ? -EFAULT : 0; + } case AMDGPU_INFO_READ_MMR_REG: { unsigned n, alloc_size; uint32_t *regs; @@ -475,6 +510,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.ids_flags = 0; if (adev->flags & AMD_IS_APU) dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; + if (amdgpu_sriov_vf(adev)) + dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); @@ -494,6 +531,71 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return copy_to_user(out, &dev_info, min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; } + case AMDGPU_INFO_VCE_CLOCK_TABLE: { + unsigned i; + struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; + struct amd_vce_state *vce_state; + + for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { + vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); + if (vce_state) { + vce_clk_table.entries[i].sclk = vce_state->sclk; + vce_clk_table.entries[i].mclk = vce_state->mclk; + vce_clk_table.entries[i].eclk = vce_state->evclk; + vce_clk_table.num_valid_entries++; + } + } + + return copy_to_user(out, &vce_clk_table, + min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; + } + case AMDGPU_INFO_VBIOS: { + uint32_t bios_size = adev->bios_size; + + switch (info->vbios_info.type) { + case AMDGPU_INFO_VBIOS_SIZE: + return copy_to_user(out, &bios_size, + min((size_t)size, sizeof(bios_size))) + ? -EFAULT : 0; + case AMDGPU_INFO_VBIOS_IMAGE: { + uint8_t *bios; + uint32_t bios_offset = info->vbios_info.offset; + + if (bios_offset >= bios_size) + return -EINVAL; + + bios = adev->bios + bios_offset; + return copy_to_user(out, bios, + min((size_t)size, (size_t)(bios_size - bios_offset))) + ? -EFAULT : 0; + } + default: + DRM_DEBUG_KMS("Invalid request %d\n", + info->vbios_info.type); + return -EINVAL; + } + } + case AMDGPU_INFO_NUM_HANDLES: { + struct drm_amdgpu_info_num_handles handle; + + switch (info->query_hw_ip.type) { + case AMDGPU_HW_IP_UVD: + /* Starting Polaris, we support unlimited UVD handles */ + if (adev->asic_type < CHIP_POLARIS10) { + handle.uvd_max_handles = adev->uvd.max_handles; + handle.uvd_used_handles = amdgpu_uvd_used_handles(adev); + + return copy_to_user(out, &handle, + min((size_t)size, sizeof(handle))) ? -EFAULT : 0; + } else { + return -ENODATA; + } + + break; + default: + return -EINVAL; + } + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->query); return -EINVAL; @@ -553,6 +655,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto out_suspend; } + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_map_static_csa(adev, &fpriv->vm); + if (r) + goto out_suspend; + } + mutex_init(&fpriv->bo_list_lock); idr_init(&fpriv->bo_list_handles); @@ -591,6 +699,14 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, amdgpu_uvd_free_handles(adev, file_priv); amdgpu_vce_free_handles(adev, file_priv); + if (amdgpu_sriov_vf(adev)) { + /* TODO: how to handle reserve failure */ + BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false)); + amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va); + fpriv->vm.csa_bo_va = NULL; + amdgpu_bo_unreserve(adev->virt.csa_obj); + } + amdgpu_vm_fini(adev, &fpriv->vm); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) @@ -775,6 +891,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |