diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
22 files changed, 248 insertions, 184 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 44f62fda4022..0283e2b3c851 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1801,8 +1801,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); -void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); -bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); void amdgpu_device_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, u64 base); void amdgpu_device_gart_location(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 0d8c3fc6eace..353993218f21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -364,7 +364,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) { struct amdgpu_atif *atif = adev->atif; - struct atif_sbios_requests req; int count; DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", @@ -379,42 +378,48 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, /* Not our event */ return NOTIFY_DONE; - /* Check pending SBIOS requests */ - count = amdgpu_atif_get_sbios_requests(atif, &req); + if (atif->functions.sbios_requests) { + struct atif_sbios_requests req; - if (count <= 0) - return NOTIFY_DONE; + /* Check pending SBIOS requests */ + count = amdgpu_atif_get_sbios_requests(atif, &req); + + if (count <= 0) + return NOTIFY_DONE; - DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); + DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); - if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) { - struct amdgpu_encoder *enc = atif->encoder_for_bl; + /* todo: add DC handling */ + if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) && + !amdgpu_device_has_dc_support(adev)) { + struct amdgpu_encoder *enc = atif->encoder_for_bl; - if (enc) { - struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; + if (enc) { + struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; - DRM_DEBUG_DRIVER("Changing brightness to %d\n", - req.backlight_level); + DRM_DEBUG_DRIVER("Changing brightness to %d\n", + req.backlight_level); - amdgpu_display_backlight_set_level(adev, enc, req.backlight_level); + amdgpu_display_backlight_set_level(adev, enc, req.backlight_level); #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - backlight_force_update(dig->bl_dev, - BACKLIGHT_UPDATE_HOTKEY); + backlight_force_update(dig->bl_dev, + BACKLIGHT_UPDATE_HOTKEY); #endif + } } - } - if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { - if ((adev->flags & AMD_IS_PX) && - amdgpu_atpx_dgpu_req_power_for_displays()) { - pm_runtime_get_sync(adev->ddev->dev); - /* Just fire off a uevent and let userspace tell us what to do */ - drm_helper_hpd_irq_event(adev->ddev); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { + if ((adev->flags & AMD_IS_PX) && + amdgpu_atpx_dgpu_req_power_for_displays()) { + pm_runtime_get_sync(adev->ddev->dev); + /* Just fire off a uevent and let userspace tell us what to do */ + drm_helper_hpd_irq_event(adev->ddev); + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + } } + /* TODO: check other events */ } - /* TODO: check other events */ /* We've handled the event, stop the notifier chain. The ACPI interface * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 079af8ac2636..fa38a960ce00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -334,7 +334,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, "Called with userptr BO")) return -EINVAL; - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) @@ -622,7 +622,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, pr_err("%s: Failed to reserve BO\n", __func__); goto release_out; } - amdgpu_ttm_placement_from_domain(bo, mem->domain); + amdgpu_bo_placement_from_domain(bo, mem->domain); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) pr_err("%s: failed to validate BO\n", __func__); @@ -1680,7 +1680,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, if (amdgpu_bo_reserve(bo, true)) return -EAGAIN; - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); amdgpu_bo_unreserve(bo); if (ret) { @@ -1824,7 +1824,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) if (mem->user_pages[0]) { amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages); - amdgpu_ttm_placement_from_domain(bo, mem->domain); + amdgpu_bo_placement_from_domain(bo, mem->domain); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) { pr_err("%s: failed to validate BO\n", __func__); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 7c5cc33d0cda..178d9ce4eba1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -419,7 +419,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, } retry: - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); p->bytes_moved += ctx.bytes_moved; @@ -478,7 +478,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, update_bytes_moved_vis = !amdgpu_gmc_vram_full_visible(&adev->gmc) && amdgpu_bo_in_cpu_visible_vram(bo); - amdgpu_ttm_placement_from_domain(bo, other); + amdgpu_bo_placement_from_domain(bo, other); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); p->bytes_moved += ctx.bytes_moved; if (update_bytes_moved_vis) @@ -532,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, /* Check if we have user pages and nobody bound the BO already */ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && lobj->user_pages) { - amdgpu_ttm_placement_from_domain(bo, - AMDGPU_GEM_DOMAIN_CPU); + amdgpu_bo_placement_from_domain(bo, + AMDGPU_GEM_DOMAIN_CPU); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (r) return r; @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job = p->job; p->job = NULL; - r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); + r = drm_sched_job_init(&job->base, entity, p->filp); if (r) { amdgpu_job_free(job); amdgpu_mn_unlock(p->mn); @@ -1262,7 +1262,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->sched); + ring = to_amdgpu_ring(entity->rq->sched); amdgpu_ring_priority_get(ring, priority); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); @@ -1655,7 +1655,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains); + amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 83e3b320a793..df6965761046 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, failed: for (j = 0; j < i; j++) - drm_sched_entity_destroy(&adev->rings[j]->sched, - &ctx->rings[j].entity); + drm_sched_entity_destroy(&ctx->rings[j].entity); kfree(ctx->fences); ctx->fences = NULL; return r; @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; - drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity); + drm_sched_entity_destroy(&ctx->rings[i].entity); } amdgpu_ctx_fini(ref); @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; - max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity, max_wait); + max_wait = drm_sched_entity_flush(&ctx->rings[i].entity, + max_wait); } } mutex_unlock(&mgr->lock); @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) continue; if (kref_read(&ctx->refcount) == 1) - drm_sched_entity_fini(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity); + drm_sched_entity_fini(&ctx->rings[i].entity); else DRM_ERROR("ctx %p is still alive\n", ctx); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 386a7b34d2f4..ec53d8f96d06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1926,7 +1926,7 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) } /** - * amdgpu_device_ip_suspend - run suspend for hardware IPs + * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) * * @adev: amdgpu_device pointer * @@ -1936,7 +1936,55 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) * in each IP into a state suitable for suspend. * Returns 0 on success, negative error code on failure. */ -int amdgpu_device_ip_suspend(struct amdgpu_device *adev) +static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) +{ + int i, r; + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_blocks[i].status.valid) + continue; + /* displays are handled separately */ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { + /* ungate blocks so that suspend can properly shut them down */ + if (adev->ip_blocks[i].version->funcs->set_clockgating_state) { + r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); + if (r) { + DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + } + /* XXX handle errors */ + r = adev->ip_blocks[i].version->funcs->suspend(adev); + /* XXX handle errors */ + if (r) { + DRM_ERROR("suspend of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + } + } + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, false); + + return 0; +} + +/** + * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) + * + * @adev: amdgpu_device pointer + * + * Main suspend function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked, clockgating is disabled and the + * suspend callbacks are run. suspend puts the hardware and software state + * in each IP into a state suitable for suspend. + * Returns 0 on success, negative error code on failure. + */ +static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) { int i, r; @@ -1957,6 +2005,9 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; + /* displays are handled in phase1 */ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) + continue; /* ungate blocks so that suspend can properly shut them down */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC && adev->ip_blocks[i].version->funcs->set_clockgating_state) { @@ -1982,6 +2033,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_suspend - run suspend for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Main suspend function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked, clockgating is disabled and the + * suspend callbacks are run. suspend puts the hardware and software state + * in each IP into a state suitable for suspend. + * Returns 0 on success, negative error code on failure. + */ +int amdgpu_device_ip_suspend(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_device_ip_suspend_phase1(adev); + if (r) + return r; + r = amdgpu_device_ip_suspend_phase2(adev); + + return r; +} + static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) { int i, r; @@ -2004,7 +2078,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) continue; r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); + DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; } @@ -2039,7 +2113,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) continue; r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); + DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; } @@ -2628,6 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) drm_kms_helper_poll_disable(dev); + if (fbcon) + amdgpu_fbdev_set_suspend(adev, 1); + if (!amdgpu_device_has_dc_support(adev)) { /* turn off display hw */ drm_modeset_lock_all(dev); @@ -2635,44 +2712,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } drm_modeset_unlock_all(dev); - } - - amdgpu_amdkfd_suspend(adev); - - /* unpin the front buffers and cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_framebuffer *fb = crtc->primary->fb; - struct amdgpu_bo *robj; - - if (amdgpu_crtc->cursor_bo) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - amdgpu_bo_unpin(aobj); - amdgpu_bo_unreserve(aobj); + /* unpin the front buffers and cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_framebuffer *fb = crtc->primary->fb; + struct amdgpu_bo *robj; + + if (amdgpu_crtc->cursor_bo) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + amdgpu_bo_unpin(aobj); + amdgpu_bo_unreserve(aobj); + } } - } - if (fb == NULL || fb->obj[0] == NULL) { - continue; - } - robj = gem_to_amdgpu_bo(fb->obj[0]); - /* don't unpin kernel fb objects */ - if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { - r = amdgpu_bo_reserve(robj, true); - if (r == 0) { - amdgpu_bo_unpin(robj); - amdgpu_bo_unreserve(robj); + if (fb == NULL || fb->obj[0] == NULL) { + continue; + } + robj = gem_to_amdgpu_bo(fb->obj[0]); + /* don't unpin kernel fb objects */ + if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { + r = amdgpu_bo_reserve(robj, true); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } } } } + + amdgpu_amdkfd_suspend(adev); + + r = amdgpu_device_ip_suspend_phase1(adev); + /* evict vram memory */ amdgpu_bo_evict_vram(adev); amdgpu_fence_driver_suspend(adev); - r = amdgpu_device_ip_suspend(adev); + r = amdgpu_device_ip_suspend_phase2(adev); /* evict remaining vram memory * This second call to evict vram is to evict the gart page table @@ -2691,11 +2770,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) DRM_ERROR("amdgpu asic reset failed\n"); } - if (fbcon) { - console_lock(); - amdgpu_fbdev_set_suspend(adev, 1); - console_unlock(); - } return 0; } @@ -2720,15 +2794,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (fbcon) - console_lock(); - if (resume) { pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); r = pci_enable_device(dev->pdev); if (r) - goto unlock; + return r; } /* post card */ @@ -2741,28 +2812,30 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) r = amdgpu_device_ip_resume(adev); if (r) { DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); - goto unlock; + return r; } amdgpu_fence_driver_resume(adev); r = amdgpu_device_ip_late_init(adev); if (r) - goto unlock; - - /* pin cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - if (amdgpu_crtc->cursor_bo) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); - if (r != 0) - DRM_ERROR("Failed to pin cursor BO (%d)\n", r); - amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); - amdgpu_bo_unreserve(aobj); + return r; + + if (!amdgpu_device_has_dc_support(adev)) { + /* pin cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (amdgpu_crtc->cursor_bo) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); + if (r != 0) + DRM_ERROR("Failed to pin cursor BO (%d)\n", r); + amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); + amdgpu_bo_unreserve(aobj); + } } } } @@ -2783,6 +2856,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } drm_modeset_unlock_all(dev); } + amdgpu_fbdev_set_suspend(adev, 0); } drm_kms_helper_poll_enable(dev); @@ -2806,15 +2880,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) #ifdef CONFIG_PM dev->dev->power.disable_depth--; #endif - - if (fbcon) - amdgpu_fbdev_set_suspend(adev, 0); - -unlock: - if (fbcon) - console_unlock(); - - return r; + return 0; } /** @@ -3091,7 +3157,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev) * @adev: amdgpu device pointer * * attempt to do soft-reset or full-reset and reinitialize Asic - * return 0 means successed otherwise failed + * return 0 means succeeded otherwise failed */ static int amdgpu_device_reset(struct amdgpu_device *adev) { @@ -3169,7 +3235,7 @@ out: * @from_hypervisor: request from hypervisor * * do VF FLR and reinitialize Asic - * return 0 means successed otherwise failed + * return 0 means succeeded otherwise failed */ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor) @@ -3294,7 +3360,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { - dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter)); + dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter)); } amdgpu_vf_error_trans_all(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index d44b76455e89..69c5d22f29bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -373,8 +373,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev) void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) { if (adev->mode_info.rfbdev) - drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper, - state); + drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper, + state); } int amdgpu_fbdev_total_size(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index bcbdcf997d20..71792d820ae0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -344,7 +344,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto free_pages; - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); amdgpu_bo_unreserve(bo); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 5a2c26a85984..391e2f7c03aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, if (!f) return -EINVAL; - r = drm_sched_job_init(&job->base, entity->sched, entity, owner); + r = drm_sched_job_init(&job->base, entity, owner); if (r) return r; @@ -143,7 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->sched); + ring = to_amdgpu_ring(entity->rq->sched); amdgpu_ring_priority_get(ring, priority); return 0; @@ -167,7 +167,7 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, struct drm_sched_entity *s_entity) { - struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched); + struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; struct dma_fence *fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 207f238649b4..c7dce14fd47d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_crtc *crtc; uint32_t ui32 = 0; uint64_t ui64 = 0; - int i, j, found; + int i, found; int ui32_size = sizeof(ui32); if (!info->return_size || !info->return_pointer) @@ -328,64 +328,61 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file case AMDGPU_HW_IP_GFX: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_gfx_rings; i++) - ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i); + ring_mask |= adev->gfx.gfx_ring[i].ready << i; ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_COMPUTE: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_compute_rings; i++) - ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i); + ring_mask |= adev->gfx.compute_ring[i].ready << i; ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_DMA: type = AMD_IP_BLOCK_TYPE_SDMA; for (i = 0; i < adev->sdma.num_instances; i++) - ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i); + ring_mask |= adev->sdma.instance[i].ring.ready << i; ib_start_alignment = 256; ib_size_alignment = 4; break; case AMDGPU_HW_IP_UVD: type = AMD_IP_BLOCK_TYPE_UVD; - for (i = 0; i < adev->uvd.num_uvd_inst; i++) - ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i); + ring_mask |= adev->uvd.inst[0].ring.ready; ib_start_alignment = 64; ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < adev->vce.num_rings; i++) - ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); + ring_mask |= adev->vce.ring[i].ready << i; ib_start_alignment = 4; ib_size_alignment = 1; break; case AMDGPU_HW_IP_UVD_ENC: type = AMD_IP_BLOCK_TYPE_UVD; - for (i = 0; i < adev->uvd.num_uvd_inst; i++) - for (j = 0; j < adev->uvd.num_enc_rings; j++) - ring_mask |= - ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) << - (j + i * adev->uvd.num_enc_rings)); + for (i = 0; i < adev->uvd.num_enc_rings; i++) + ring_mask |= + adev->uvd.inst[0].ring_enc[i].ready << i; ib_start_alignment = 64; ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_dec.ready ? 1 : 0; + ring_mask = adev->vcn.ring_dec.ready; ib_start_alignment = 16; ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; for (i = 0; i < adev->vcn.num_enc_rings; i++) - ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i); + ring_mask |= adev->vcn.ring_enc[i].ready << i; ib_start_alignment = 64; ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0; + ring_mask = adev->vcn.ring_jpeg.ready; ib_start_alignment = 16; ib_size_alignment = 16; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b12526ce1a9d..21bfa2d8039e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -51,7 +51,7 @@ * */ -static bool amdgpu_need_backup(struct amdgpu_device *adev) +static bool amdgpu_bo_need_backup(struct amdgpu_device *adev) { if (adev->flags & AMD_IS_APU) return false; @@ -84,12 +84,12 @@ static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo) } } -static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) +static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); - if (WARN_ON_ONCE(bo->pin_count > 0)) + if (bo->pin_count > 0) amdgpu_bo_subtract_pin_size(bo); if (bo->kfd_bo) @@ -111,7 +111,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) } /** - * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo + * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo * @bo: buffer object to be checked * * Uses destroy function associated with the object to determine if this is @@ -120,22 +120,22 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) * Returns: * true if the object belongs to &amdgpu_bo, false if not. */ -bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) +bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) { - if (bo->destroy == &amdgpu_ttm_bo_destroy) + if (bo->destroy == &amdgpu_bo_destroy) return true; return false; } /** - * amdgpu_ttm_placement_from_domain - set buffer's placement + * amdgpu_bo_placement_from_domain - set buffer's placement * @abo: &amdgpu_bo buffer object whose placement is to be set * @domain: requested domain * * Sets buffer's placement according to requested domain and the buffer's * flags. */ -void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) +void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) { struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct ttm_placement *placement = &abo->placement; @@ -216,6 +216,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) c++; } + BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS); + placement->num_placement = c; placement->placement = places; @@ -488,13 +490,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, #endif bo->tbo.bdev = &adev->mman.bdev; - amdgpu_ttm_placement_from_domain(bo, bp->domain); + amdgpu_bo_placement_from_domain(bo, bp->domain); if (bp->type == ttm_bo_type_kernel) bo->tbo.priority = 1; r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, &bo->placement, page_align, &ctx, acc_size, - NULL, bp->resv, &amdgpu_ttm_bo_destroy); + NULL, bp->resv, &amdgpu_bo_destroy); if (unlikely(r != 0)) return r; @@ -594,7 +596,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (r) return r; - if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { + if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) { if (!bp->resv) WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, NULL)); @@ -682,7 +684,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) domain = bo->preferred_domains; retry: - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { domain = bo->allowed_domains; @@ -915,7 +917,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, /* force to pin into visible video ram */ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { unsigned fpfn, lpfn; @@ -1246,7 +1248,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct amdgpu_bo *abo; struct ttm_mem_reg *old_mem = &bo->mem; - if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) + if (!amdgpu_bo_is_amdgpu_bo(bo)) return; abo = ttm_to_amdgpu_bo(bo); @@ -1263,7 +1265,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, return; /* move_notify is called before move happens */ - trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); + trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } /** @@ -1285,7 +1287,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) unsigned long offset, size; int r; - if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) + if (!amdgpu_bo_is_amdgpu_bo(bo)) return 0; abo = ttm_to_amdgpu_bo(bo); @@ -1307,8 +1309,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ atomic64_inc(&adev->num_vram_cpu_page_faults); - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT); /* Avoid costly evictions; only set GTT as a busy placement */ abo->placement.num_busy_placement = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 9c3e29a04eb1..18945dd6982d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -32,6 +32,7 @@ #include "amdgpu.h" #define AMDGPU_BO_INVALID_OFFSET LONG_MAX +#define AMDGPU_BO_MAX_PLACEMENTS 3 struct amdgpu_bo_param { unsigned long size; @@ -77,7 +78,7 @@ struct amdgpu_bo { /* Protected by tbo.reserved */ u32 preferred_domains; u32 allowed_domains; - struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; @@ -234,6 +235,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; } +bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); +void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); + int amdgpu_bo_create(struct amdgpu_device *adev, struct amdgpu_bo_param *bp, struct amdgpu_bo **bo_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 3ed02f472003..1c5d97f4b4dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -323,7 +323,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, return ret; if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index ea9850c9224d..d8357290ad09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, u32 ring, struct amdgpu_ring **out_ring) { - u32 instance; - switch (mapper->hw_ip) { case AMDGPU_HW_IP_GFX: *out_ring = &adev->gfx.gfx_ring[ring]; @@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, *out_ring = &adev->sdma.instance[ring].ring; break; case AMDGPU_HW_IP_UVD: - instance = ring; - *out_ring = &adev->uvd.inst[instance].ring; + *out_ring = &adev->uvd.inst[0].ring; break; case AMDGPU_HW_IP_VCE: *out_ring = &adev->vce.ring[ring]; break; case AMDGPU_HW_IP_UVD_ENC: - instance = ring / adev->uvd.num_enc_rings; - *out_ring = - &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings]; + *out_ring = &adev->uvd.inst[0].ring_enc[ring]; break; case AMDGPU_HW_IP_VCN_DEC: *out_ring = &adev->vcn.ring_dec; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 76920035eb22..11f262f15200 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -436,7 +436,7 @@ TRACE_EVENT(amdgpu_cs_bo_status, __entry->total_bo, __entry->total_size) ); -TRACE_EVENT(amdgpu_ttm_bo_move, +TRACE_EVENT(amdgpu_bo_move, TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement), TP_ARGS(bo, new_placement, old_placement), TP_STRUCT__entry( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 13977ea6a097..8c4358e36c87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -248,7 +248,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } /* Object isn't an AMDGPU object so ignore */ - if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { + if (!amdgpu_bo_is_amdgpu_bo(bo)) { placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; @@ -261,7 +261,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, case TTM_PL_VRAM: if (!adev->mman.buffer_funcs_enabled) { /* Move to system memory */ - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && amdgpu_bo_in_cpu_visible_vram(abo)) { @@ -271,7 +271,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, * BO will be evicted to GTT rather than causing other * BOs to be evicted from VRAM */ - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; abo->placements[0].lpfn = 0; @@ -279,12 +279,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, abo->placement.num_busy_placement = 1; } else { /* Move to GTT memory */ - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); } break; case TTM_PL_TT: default: - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } *placement = abo->placement; } @@ -1925,8 +1925,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) return; } } else { - drm_sched_entity_destroy(adev->mman.entity.sched, - &adev->mman.entity); + drm_sched_entity_destroy(&adev->mman.entity); + dma_fence_put(man->move); + man->move = NULL; } /* this just adjusts TTM size idea, which sets lpfn to the correct value */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 80b5c453f8c1..fca86d71fafc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int i, j; - drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, - &adev->uvd.entity); + drm_sched_entity_destroy(&adev->uvd.entity); for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { kfree(adev->uvd.inst[j].saved_bo); @@ -473,7 +472,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) if (cmd == 0x0 || cmd == 0x3) { /* yes, force it into VRAM */ uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); } amdgpu_uvd_force_into_uvd_segment(bo); @@ -1014,7 +1013,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (!ring->adev->uvd.address_64_bit) { struct ttm_operation_ctx ctx = { true, false }; - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_uvd_force_into_uvd_segment(bo); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 86182c966ed6..b6ab4f5350c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; - drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); + drm_sched_entity_destroy(&adev->vce.entity); amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, (void **)&adev->vce.cpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 098dd1ba751a..5d7d7900ccab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -387,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ats_entries = 0; } - ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); if (r) @@ -1113,7 +1113,7 @@ restart: struct amdgpu_ring *ring; struct dma_fence *fence; - ring = container_of(vm->entity.sched, struct amdgpu_ring, + ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); amdgpu_ring_pad_ib(ring, params.ib); @@ -1403,7 +1403,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, addr, flags); } - ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); nptes = last - start + 1; @@ -2642,7 +2642,7 @@ error_free_root: vm->root.base.bo = NULL; error_free_sched_entity: - drm_sched_entity_destroy(&ring->sched, &vm->entity); + drm_sched_entity_destroy(&vm->entity); return r; } @@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } - drm_sched_entity_destroy(vm->entity.sched, &vm->entity); + drm_sched_entity_destroy(&vm->entity); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 702e257a483f..78ab939ae5d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK; WREG32_PCIE(ixPCIE_LC_CNTL4, tmp); - mdelay(100); + msleep(100); /* linkctl */ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9ab39117cc4e..ef00d14f8645 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3490,7 +3490,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) /* wait for RLC_SAFE_MODE */ for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 9df94b45d17d..399a5db27649 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -269,7 +269,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, task_info.task_name, task_info.pid); - dev_err(adev->dev, " at page 0x%016llx from %d\n", + dev_err(adev->dev, " at address 0x%016llx from %d\n", addr, entry->client_id); if (!amdgpu_sriov_vf(adev)) dev_err(adev->dev, |