From 8fdf074f1840eae838bbccbec37d0a1504ee432b Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 6 Jun 2017 17:25:13 +0800 Subject: drm/amdgpu:fix world switch hang MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit for SR-IOV, we must keep the pipeline-sync in the protection of COND_EXEC, otherwise the command consumed by CPG is not consistent when world switch triggerd, e.g.: world switch hit and the IB frame is skipped so the fence won't signal, thus CP will jump to the next DMAframe's pipeline-sync command, and it will make CP hang foever. after pipelin-sync moved into COND_EXEC the consistency can be guaranteed Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 936f158bc5ec..3441ec58c823 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -222,7 +222,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_job *job); -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, unsigned vmid); void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); -- cgit v1.2.3 From 6be7adb37d9bd8af02e53feb72c35e9624165889 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 23 May 2017 18:35:22 +0200 Subject: drm/amdgpu: increase fragmentation size for Vega10 v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The fragment bits work differently for Vega10 compared to previous generations. Increase the fragment size to 2MB for now to better handle that. v2: handle the hardware setup as well Signed-off-by: Christian König Reviewed-and-tested-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 +++- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 4 +++- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 4 +++- 5 files changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 632bd08610ed..09f833255ba1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -589,8 +589,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); - dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) * - AMDGPU_GPU_PAGE_SIZE; + dev_info.pte_fragment_size = + (1 << AMDGPU_LOG2_PAGES_PER_FRAG(adev)) * + AMDGPU_GPU_PAGE_SIZE; dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; dev_info.cu_active_number = adev->gfx.cu_info.number; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b017b54e45ba..088c8504dbe7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1381,8 +1381,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, */ /* SI and newer are optimized for 64KB */ - uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG); - uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; + unsigned pages_per_frag = AMDGPU_LOG2_PAGES_PER_FRAG(params->adev); + uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag); + uint64_t frag_align = 1 << pages_per_frag; uint64_t frag_start = ALIGN(start, frag_align); uint64_t frag_end = end & ~(frag_align - 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 3441ec58c823..c4f5d1ff042e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -51,7 +51,9 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 /* LOG2 number of continuous pages for the fragment field */ -#define AMDGPU_LOG2_PAGES_PER_FRAG 4 +#define AMDGPU_LOG2_PAGES_PER_FRAG(adev) \ + ((adev)->asic_type < CHIP_VEGA10 ? 4 : \ + (adev)->vm_manager.block_size) #define AMDGPU_PTE_VALID (1ULL << 0) #define AMDGPU_PTE_SYSTEM (1ULL << 1) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 008ad3dc4afd..408723ef157c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -129,7 +129,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); /* XXX for emulation, Refer to closed source code.*/ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); @@ -144,6 +144,8 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); tmp = mmVM_L2_CNTL3_DEFAULT; + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9); WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 96f1628541bb..ad8def3cc343 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -143,7 +143,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); /* XXX for emulation, Refer to closed source code.*/ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); @@ -158,6 +158,8 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); tmp = mmVM_L2_CNTL3_DEFAULT; + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; -- cgit v1.2.3 From cf2f0a372049451eb824982b7fb26b1a15097821 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 25 Jul 2017 16:35:38 -0400 Subject: drm/amdgpu: enable huge page handling in the VM v5 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The hardware can use huge pages to map 2MB of address space with only one PDE. v2: few cleanups and rebased v3: skip PT updates if we are using the PDE v4: rebased, added support for CPU based updates v5: fix CPU based updates once more v6: fix ndw estimation Signed-off-by: Christian König Reviewed-and-tested-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 120 +++++++++++++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 ++ 2 files changed, 104 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 088c8504dbe7..250c8e80e646 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -352,6 +352,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, entry->bo = pt; entry->addr = 0; + entry->huge_page = false; } if (level < adev->vm_manager.num_level) { @@ -1117,7 +1118,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, pt = amdgpu_bo_gpu_offset(bo); pt = amdgpu_gart_get_vm_pde(adev, pt); - if (parent->entries[pt_idx].addr == pt) + if (parent->entries[pt_idx].addr == pt || + parent->entries[pt_idx].huge_page) continue; parent->entries[pt_idx].addr = pt; @@ -1258,29 +1260,95 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, } /** - * amdgpu_vm_find_pt - find the page table for an address + * amdgpu_vm_find_entry - find the entry for an address * * @p: see amdgpu_pte_update_params definition * @addr: virtual address in question + * @entry: resulting entry or NULL + * @parent: parent entry * - * Find the page table BO for a virtual address, return NULL when none found. + * Find the vm_pt entry and it's parent for the given address. */ -static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p, - uint64_t addr) +void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, + struct amdgpu_vm_pt **entry, + struct amdgpu_vm_pt **parent) { - struct amdgpu_vm_pt *entry = &p->vm->root; unsigned idx, level = p->adev->vm_manager.num_level; - while (entry->entries) { + *parent = NULL; + *entry = &p->vm->root; + while ((*entry)->entries) { idx = addr >> (p->adev->vm_manager.block_size * level--); - idx %= amdgpu_bo_size(entry->bo) / 8; - entry = &entry->entries[idx]; + idx %= amdgpu_bo_size((*entry)->bo) / 8; + *parent = *entry; + *entry = &(*entry)->entries[idx]; } if (level) - return NULL; + *entry = NULL; +} + +/** + * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages + * + * @p: see amdgpu_pte_update_params definition + * @entry: vm_pt entry to check + * @parent: parent entry + * @nptes: number of PTEs updated with this operation + * @dst: destination address where the PTEs should point to + * @flags: access flags fro the PTEs + * + * Check if we can update the PD with a huge page. + */ +static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, + struct amdgpu_vm_pt *entry, + struct amdgpu_vm_pt *parent, + unsigned nptes, uint64_t dst, + uint64_t flags) +{ + bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes); + uint64_t pd_addr, pde; + int r; + + /* In the case of a mixed PT the PDE must point to it*/ + if (p->adev->asic_type < CHIP_VEGA10 || + nptes != AMDGPU_VM_PTE_COUNT(p->adev) || + p->func == amdgpu_vm_do_copy_ptes || + !(flags & AMDGPU_PTE_VALID)) { + + dst = amdgpu_bo_gpu_offset(entry->bo); + dst = amdgpu_gart_get_vm_pde(p->adev, dst); + flags = AMDGPU_PTE_VALID; + } else { + flags |= AMDGPU_PDE_PTE; + } + + if (entry->addr == dst && + entry->huge_page == !!(flags & AMDGPU_PDE_PTE)) + return 0; + + entry->addr = dst; + entry->huge_page = !!(flags & AMDGPU_PDE_PTE); + + if (use_cpu_update) { + r = amdgpu_bo_kmap(parent->bo, (void *)&pd_addr); + if (r) + return r; + + pde = pd_addr + (entry - parent->entries) * 8; + amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); + } else { + if (parent->bo->shadow) { + pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); + pde = pd_addr + (entry - parent->entries) * 8; + amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); + } + pd_addr = amdgpu_bo_gpu_offset(parent->bo); + pde = pd_addr + (entry - parent->entries) * 8; + amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); + } - return entry->bo; + return 0; } /** @@ -1307,21 +1375,31 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_bo *pt; unsigned nptes; bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes); - + int r; /* walk over the address space and update the page tables */ - for (addr = start; addr < end; addr += nptes) { - pt = amdgpu_vm_get_pt(params, addr); - if (!pt) { - pr_err("PT not found, aborting update_ptes\n"); - return -EINVAL; - } + for (addr = start; addr < end; addr += nptes, + dst += nptes * AMDGPU_GPU_PAGE_SIZE) { + struct amdgpu_vm_pt *entry, *parent; + + amdgpu_vm_get_entry(params, addr, &entry, &parent); + if (!entry) + return -ENOENT; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); + r = amdgpu_vm_handle_huge_pages(params, entry, parent, + nptes, dst, flags); + if (r) + return r; + + if (entry->huge_page) + continue; + + pt = entry->bo; if (use_cpu_update) { pe_start = (unsigned long)pt->kptr; } else { @@ -1337,8 +1415,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, pe_start += (addr & mask) * 8; params->func(params, pe_start, dst, nptes, AMDGPU_GPU_PAGE_SIZE, flags); - - dst += nptes * AMDGPU_GPU_PAGE_SIZE; } return 0; @@ -1490,6 +1566,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; + /* one PDE write for each huge page */ + ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6; + if (src) { /* only copy commands needed */ ndw += ncmds * 7; @@ -1569,6 +1648,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, error_free: amdgpu_job_free(job); + amdgpu_vm_invalidate_level(&vm->root); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index c4f5d1ff042e..34d9174ebff2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -70,6 +70,9 @@ struct amdgpu_bo_list_entry; /* TILED for VEGA10, reserved for older ASICs */ #define AMDGPU_PTE_PRT (1ULL << 51) +/* PDE is handled as PTE for VEGA10 */ +#define AMDGPU_PDE_PTE (1ULL << 54) + /* VEGA10 only */ #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) @@ -100,6 +103,7 @@ struct amdgpu_bo_list_entry; struct amdgpu_vm_pt { struct amdgpu_bo *bo; uint64_t addr; + bool huge_page; /* array of page tables, one for each directory entry */ struct amdgpu_vm_pt *entries; -- cgit v1.2.3 From 51ac7eec620b8ec705955ad2c845a5b5fed6b40f Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Thu, 27 Jul 2017 12:48:22 -0400 Subject: drm/amdgpu: Support IOMMU on Raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We achieved that by setting S(SYSTEM) and P(PDE as PTE) bit to 1 for PDEs and setting S bit to 1 for PTEs when the corresponding addresses are not occupied by gpu driver allocated buffers. Signed-off-by: Yong Zhao Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 29 ++++++++++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +++ 2 files changed, 27 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 601e899005d0..9ce36652029e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -288,6 +288,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, unsigned pt_idx, from, to; int r; u64 flags; + uint64_t init_value = 0; if (!parent->entries) { unsigned num_entries = amdgpu_vm_num_entries(adev, level); @@ -321,6 +322,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_SHADOW); + if (vm->pte_support_ats) { + init_value = AMDGPU_PTE_SYSTEM; + if (level != adev->vm_manager.num_level - 1) + init_value |= AMDGPU_PDE_PTE; + } + /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { struct reservation_object *resv = vm->root.bo->tbo.resv; @@ -333,7 +340,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, resv, 0, &pt); + NULL, resv, init_value, &pt); if (r) return r; @@ -1995,15 +2002,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; struct dma_fence *f = NULL; int r; + uint64_t init_pte_value = 0; while (!list_empty(&vm->freed)) { mapping = list_first_entry(&vm->freed, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); + if (vm->pte_support_ats) + init_pte_value = AMDGPU_PTE_SYSTEM; + r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm, mapping->start, mapping->last, - 0, 0, &f); + init_pte_value, 0, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); @@ -2494,6 +2505,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amd_sched_rq *rq; int r, i; u64 flags; + uint64_t init_pde_value = 0; vm->va = RB_ROOT; vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); @@ -2515,10 +2527,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) return r; - if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) + vm->pte_support_ats = false; + + if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - else + + if (adev->asic_type == CHIP_RAVEN) { + vm->pte_support_ats = true; + init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE; + } + } else vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_GFX); DRM_DEBUG_DRIVER("VM update mode is %s\n", @@ -2538,7 +2557,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, NULL, 0, &vm->root.bo); + NULL, NULL, init_pde_value, &vm->root.bo); if (r) goto error_free_sched_entity; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 34d9174ebff2..217ecba8f4cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -146,6 +146,9 @@ struct amdgpu_vm { /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ bool use_cpu_for_update; + + /* Flag to indicate ATS support from PTE for GFX9 */ + bool pte_support_ats; }; struct amdgpu_vm_id { -- cgit v1.2.3 From b636922553ee2c47b9e3955c5665b8996dfcdbd7 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 3 Aug 2017 11:44:01 -0400 Subject: drm/amdgpu: only move VM BOs in the LRU during validation v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This should save us a bunch of command submission overhead. v2: move the LRU move to the right place to avoid the move for the root BO and handle the shadow BOs as well. This turned out to be a bug fix because the move needs to happen before the kmap. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 15 +++------ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 58 +++++++--------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 -- 3 files changed, 16 insertions(+), 59 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index c05479ec825a..825784b3b193 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -673,10 +673,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } error_validate: - if (r) { - amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); + if (r) ttm_eu_backoff_reservation(&p->ticket, &p->validated); - } error_free_pages: @@ -724,21 +722,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ -static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) +static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, + bool backoff) { - struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; unsigned i; - if (!error) { - amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); - + if (!error) ttm_eu_fence_buffer_objects(&parser->ticket, &parser->validated, parser->fence); - } else if (backoff) { + else if (backoff) ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); - } for (i = 0; i < parser->num_post_dep_syncobjs; i++) drm_syncobj_put(parser->post_dep_syncobjs[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9ce36652029e..ff8ab2074a59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -159,7 +159,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, */ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, int (*validate)(void *, struct amdgpu_bo *), - void *param, bool use_cpu_for_update) + void *param, bool use_cpu_for_update, + struct ttm_bo_global *glob) { unsigned i; int r; @@ -183,12 +184,18 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, if (r) return r; + spin_lock(&glob->lru_lock); + ttm_bo_move_to_lru_tail(&entry->bo->tbo); + if (entry->bo->shadow) + ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo); + spin_unlock(&glob->lru_lock); + /* * Recurse into the sub directory. This is harmless because we * have only a maximum of 5 layers. */ r = amdgpu_vm_validate_level(entry, validate, param, - use_cpu_for_update); + use_cpu_for_update, glob); if (r) return r; } @@ -220,54 +227,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, return 0; return amdgpu_vm_validate_level(&vm->root, validate, param, - vm->use_cpu_for_update); + vm->use_cpu_for_update, + adev->mman.bdev.glob); } /** - * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail - * - * @adev: amdgpu device instance - * @vm: vm providing the BOs - * - * Move the PT BOs to the tail of the LRU. - */ -static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent) -{ - unsigned i; - - if (!parent->entries) - return; - - for (i = 0; i <= parent->last_entry_used; ++i) { - struct amdgpu_vm_pt *entry = &parent->entries[i]; - - if (!entry->bo) - continue; - - ttm_bo_move_to_lru_tail(&entry->bo->tbo); - amdgpu_vm_move_level_in_lru(entry); - } -} - -/** - * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail - * - * @adev: amdgpu device instance - * @vm: vm providing the BOs - * - * Move the PT BOs to the tail of the LRU. - */ -void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, - struct amdgpu_vm *vm) -{ - struct ttm_bo_global *glob = adev->mman.bdev.glob; - - spin_lock(&glob->lru_lock); - amdgpu_vm_move_level_in_lru(&vm->root); - spin_unlock(&glob->lru_lock); -} - - /** * amdgpu_vm_alloc_levels - allocate the PD/PT levels * * @adev: amdgpu_device pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 217ecba8f4cc..6e94cd2e610c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -223,8 +223,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); -void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, - struct amdgpu_vm *vm); int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size); -- cgit v1.2.3 From 0f4b3c68626199cd5ce619e2a3105d44b81f2753 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 31 Jul 2017 15:32:40 +0200 Subject: drm/amdgpu: cleanup static CSA handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the CSA bo_va from the VM to the fpriv structure. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 25 ++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 -- 6 files changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ad944aea0d4b..1f915a5ce9ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -748,6 +748,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); struct amdgpu_fpriv { struct amdgpu_vm vm; struct amdgpu_bo_va *prt_va; + struct amdgpu_bo_va *csa_va; struct mutex bo_list_lock; struct idr bo_list_handles; struct amdgpu_ctx_mgr ctx_mgr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 7e71a511990e..3c64248673ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -787,7 +787,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) if (amdgpu_sriov_vf(adev)) { struct dma_fence *f; - bo_va = vm->csa_bo_va; + + bo_va = fpriv->csa_va; BUG_ON(!bo_va); r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 29cd5dabf8b5..1aac5821ac8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -843,7 +843,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) } if (amdgpu_sriov_vf(adev)) { - r = amdgpu_map_static_csa(adev, &fpriv->vm); + r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); if (r) goto out_suspend; } @@ -896,8 +896,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, if (amdgpu_sriov_vf(adev)) { /* TODO: how to handle reserve failure */ BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); - amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va); - fpriv->vm.csa_bo_va = NULL; + amdgpu_vm_bo_rmv(adev, fpriv->csa_va); + fpriv->csa_va = NULL; amdgpu_bo_unreserve(adev->virt.csa_obj); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 8a081e162d13..89208456d360 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -46,14 +46,14 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev) * address within META_DATA init package to support SRIOV gfx preemption. */ -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va) { - int r; - struct amdgpu_bo_va *bo_va; struct ww_acquire_ctx ticket; struct list_head list; struct amdgpu_bo_list_entry pd; struct ttm_validate_buffer csa_tv; + int r; INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&csa_tv.head); @@ -69,34 +69,33 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) return r; } - bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); - if (!bo_va) { + *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); + if (!*bo_va) { ttm_eu_backoff_reservation(&ticket, &list); DRM_ERROR("failed to create bo_va for static CSA\n"); return -ENOMEM; } - r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR, - AMDGPU_CSA_SIZE); + r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR, + AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); - amdgpu_vm_bo_rmv(adev, bo_va); + amdgpu_vm_bo_rmv(adev, *bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } - r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); + r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, + AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | + AMDGPU_PTE_EXECUTABLE); if (r) { DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); - amdgpu_vm_bo_rmv(adev, bo_va); + amdgpu_vm_bo_rmv(adev, *bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } - vm->csa_bo_va = bo_va; ttm_eu_backoff_reservation(&ticket, &list); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index e5b1baf387c1..afcfb8bcfb65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -90,7 +90,8 @@ static inline bool is_virtual_machine(void) struct amdgpu_vm; int amdgpu_allocate_static_csa(struct amdgpu_device *adev); -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va); void amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 6e94cd2e610c..9c309c5a86f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -141,8 +141,6 @@ struct amdgpu_vm { u64 client_id; /* dedicated to vm */ struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; - /* each VM will map on CSA */ - struct amdgpu_bo_va *csa_bo_va; /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ bool use_cpu_for_update; -- cgit v1.2.3 From 4ab4016aaf82153d144fa678cd6b4b5b6f25ed70 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 3 Aug 2017 20:30:50 +0200 Subject: drm/amdgpu: drop the extra VM huge page flag v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just add the flags to the addr field as well. v2: add some more comments that the flag is for huge pages. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 18 +++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 - 2 files changed, 9 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 420026bc2514..14012e80fa27 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -331,7 +331,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, entry->bo = pt; entry->addr = 0; - entry->huge_page = false; } if (level < adev->vm_manager.num_level) { @@ -1083,11 +1082,12 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, pt = amdgpu_bo_gpu_offset(bo); pt = amdgpu_gart_get_vm_pde(adev, pt); - if (parent->entries[pt_idx].addr == pt || - parent->entries[pt_idx].huge_page) + /* Don't update huge pages here */ + if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) || + parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID)) continue; - parent->entries[pt_idx].addr = pt; + parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID; pde = pd_addr + pt_idx * 8; if (((last_pde + 8 * count) != pde) || @@ -1284,15 +1284,14 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, dst = amdgpu_gart_get_vm_pde(p->adev, dst); flags = AMDGPU_PTE_VALID; } else { + /* Set the huge page flag to stop scanning at this PDE */ flags |= AMDGPU_PDE_PTE; } - if (entry->addr == dst && - entry->huge_page == !!(flags & AMDGPU_PDE_PTE)) + if (entry->addr == (dst | flags)) return; - entry->addr = dst; - entry->huge_page = !!(flags & AMDGPU_PDE_PTE); + entry->addr = (dst | flags); if (use_cpu_update) { pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); @@ -1351,7 +1350,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, amdgpu_vm_handle_huge_pages(params, entry, parent, nptes, dst, flags); - if (entry->huge_page) + /* We don't need to update PTEs for huge pages */ + if (entry->addr & AMDGPU_PDE_PTE) continue; pt = entry->bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 9c309c5a86f1..f12c12fec3c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -103,7 +103,6 @@ struct amdgpu_bo_list_entry; struct amdgpu_vm_pt { struct amdgpu_bo *bo; uint64_t addr; - bool huge_page; /* array of page tables, one for each directory entry */ struct amdgpu_vm_pt *entries; -- cgit v1.2.3 From ec681545afe5a448b43a2fe5c206ee48e19dabb3 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 1 Aug 2017 10:51:43 +0200 Subject: drm/amdgpu: separate bo_va structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split that into vm_bo_base and bo_va to allow other uses as well. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 14 ++---- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 81 ++++++++++++++++-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 12 +++++ 7 files changed, 66 insertions(+), 55 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3c64248673ee..75e7141c8de4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1487,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, addr > mapping->last) continue; - *bo = lobj->bo_va->bo; + *bo = lobj->bo_va->base.bo; return mapping; } @@ -1496,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, addr > mapping->last) continue; - *bo = lobj->bo_va->bo; + *bo = lobj->bo_va->base.bo; return mapping; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 5ae9941bad7c..7171968f261e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -621,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, switch (args->operation) { case AMDGPU_VA_OP_MAP: - r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address, + r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, args->map_size); if (r) goto error_backoff; @@ -641,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->map_size); break; case AMDGPU_VA_OP_REPLACE: - r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address, + r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, args->map_size); if (r) goto error_backoff; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 9b7b4fcb047b..a288fa6d72c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -33,6 +33,7 @@ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX +/* bo virtual addresses in a vm */ struct amdgpu_bo_va_mapping { struct list_head list; struct rb_node rb; @@ -43,26 +44,19 @@ struct amdgpu_bo_va_mapping { uint64_t flags; }; -/* bo virtual addresses in a specific vm */ +/* User space allocated BO in a VM */ struct amdgpu_bo_va { + struct amdgpu_vm_bo_base base; + /* protected by bo being reserved */ - struct list_head bo_list; struct dma_fence *last_pt_update; unsigned ref_count; - /* protected by vm mutex and spinlock */ - struct list_head vm_status; - /* mappings for this bo_va */ struct list_head invalids; struct list_head valids; - - /* constant after initialization */ - struct amdgpu_vm *vm; - struct amdgpu_bo *bo; }; - struct amdgpu_bo { /* Protected by tbo.reserved */ u32 preferred_domains; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index d8cd3e554488..1c88bd5e29ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -284,7 +284,7 @@ TRACE_EVENT(amdgpu_vm_bo_map, ), TP_fast_assign( - __entry->bo = bo_va ? bo_va->bo : NULL; + __entry->bo = bo_va ? bo_va->base.bo : NULL; __entry->start = mapping->start; __entry->last = mapping->last; __entry->offset = mapping->offset; @@ -308,7 +308,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, ), TP_fast_assign( - __entry->bo = bo_va->bo; + __entry->bo = bo_va->base.bo; __entry->start = mapping->start; __entry->last = mapping->last; __entry->offset = mapping->offset; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 89208456d360..ab05121b9272 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -76,7 +76,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, return -ENOMEM; } - r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR, + r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 14012e80fa27..f24554f2d0e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -870,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, { struct amdgpu_bo_va *bo_va; - list_for_each_entry(bo_va, &bo->va, bo_list) { - if (bo_va->vm == vm) { + list_for_each_entry(bo_va, &bo->va, base.bo_list) { + if (bo_va->base.vm == vm) { return bo_va; } } @@ -1726,7 +1726,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear) { - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_bo *bo = bo_va->base.bo; + struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint64_t gtt_flags, flags; @@ -1735,27 +1736,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct dma_fence *exclusive; int r; - if (clear || !bo_va->bo) { + if (clear || !bo_va->base.bo) { mem = NULL; nodes = NULL; exclusive = NULL; } else { struct ttm_dma_tt *ttm; - mem = &bo_va->bo->tbo.mem; + mem = &bo_va->base.bo->tbo.mem; nodes = mem->mm_node; if (mem->mem_type == TTM_PL_TT) { - ttm = container_of(bo_va->bo->tbo.ttm, struct - ttm_dma_tt, ttm); + ttm = container_of(bo_va->base.bo->tbo.ttm, + struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } - exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); + exclusive = reservation_object_get_excl(bo->tbo.resv); } - if (bo_va->bo) { - flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); - gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && - adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? + if (bo) { + flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); + gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) && + adev == amdgpu_ttm_adev(bo->tbo.bdev)) ? flags : 0; } else { flags = 0x0; @@ -1763,7 +1764,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } spin_lock(&vm->status_lock); - if (!list_empty(&bo_va->vm_status)) + if (!list_empty(&bo_va->base.vm_status)) list_splice_init(&bo_va->valids, &bo_va->invalids); spin_unlock(&vm->status_lock); @@ -1786,9 +1787,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_lock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); - list_del_init(&bo_va->vm_status); + list_del_init(&bo_va->base.vm_status); if (clear) - list_add(&bo_va->vm_status, &vm->cleared); + list_add(&bo_va->base.vm_status, &vm->cleared); spin_unlock(&vm->status_lock); if (vm->use_cpu_for_update) { @@ -2001,7 +2002,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, spin_lock(&vm->status_lock); while (!list_empty(&vm->invalidated)) { bo_va = list_first_entry(&vm->invalidated, - struct amdgpu_bo_va, vm_status); + struct amdgpu_bo_va, base.vm_status); spin_unlock(&vm->status_lock); r = amdgpu_vm_bo_update(adev, bo_va, true); @@ -2041,16 +2042,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, if (bo_va == NULL) { return NULL; } - bo_va->vm = vm; - bo_va->bo = bo; + bo_va->base.vm = vm; + bo_va->base.bo = bo; + INIT_LIST_HEAD(&bo_va->base.bo_list); + INIT_LIST_HEAD(&bo_va->base.vm_status); + bo_va->ref_count = 1; - INIT_LIST_HEAD(&bo_va->bo_list); INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); - INIT_LIST_HEAD(&bo_va->vm_status); if (bo) - list_add_tail(&bo_va->bo_list, &bo->va); + list_add_tail(&bo_va->base.bo_list, &bo->va); return bo_va; } @@ -2075,7 +2077,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t size, uint64_t flags) { struct amdgpu_bo_va_mapping *mapping, *tmp; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_bo *bo = bo_va->base.bo; + struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; /* validate the parameters */ @@ -2086,7 +2089,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || - (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) + (bo && offset + size > amdgpu_bo_size(bo))) return -EINVAL; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -2096,7 +2099,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (tmp) { /* bo and tmp overlap, invalid addr */ dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " - "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr, + "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, tmp->start, tmp->last + 1); return -EINVAL; } @@ -2141,7 +2144,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, uint64_t size, uint64_t flags) { struct amdgpu_bo_va_mapping *mapping; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_bo *bo = bo_va->base.bo; + struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; int r; @@ -2153,7 +2157,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || - (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) + (bo && offset + size > amdgpu_bo_size(bo))) return -EINVAL; /* Allocate all the needed memory */ @@ -2161,7 +2165,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, if (!mapping) return -ENOMEM; - r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size); + r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); if (r) { kfree(mapping); return r; @@ -2201,7 +2205,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, uint64_t saddr) { struct amdgpu_bo_va_mapping *mapping; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_vm *vm = bo_va->base.vm; bool valid = true; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -2349,12 +2353,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va) { struct amdgpu_bo_va_mapping *mapping, *next; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_vm *vm = bo_va->base.vm; - list_del(&bo_va->bo_list); + list_del(&bo_va->base.bo_list); spin_lock(&vm->status_lock); - list_del(&bo_va->vm_status); + list_del(&bo_va->base.vm_status); spin_unlock(&vm->status_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { @@ -2386,13 +2390,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo) { - struct amdgpu_bo_va *bo_va; - - list_for_each_entry(bo_va, &bo->va, bo_list) { - spin_lock(&bo_va->vm->status_lock); - if (list_empty(&bo_va->vm_status)) - list_add(&bo_va->vm_status, &bo_va->vm->invalidated); - spin_unlock(&bo_va->vm->status_lock); + struct amdgpu_vm_bo_base *bo_base; + + list_for_each_entry(bo_base, &bo->va, bo_list) { + spin_lock(&bo_base->vm->status_lock); + if (list_empty(&bo_base->vm_status)) + list_add(&bo_base->vm_status, + &bo_base->vm->invalidated); + spin_unlock(&bo_base->vm->status_lock); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index f12c12fec3c0..95e5e81e1026 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -99,6 +99,18 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) +/* base structure for tracking BO usage in a VM */ +struct amdgpu_vm_bo_base { + /* constant after initialization */ + struct amdgpu_vm *vm; + struct amdgpu_bo *bo; + + /* protected by bo being reserved */ + struct list_head bo_list; + + /* protected by spinlock */ + struct list_head vm_status; +}; struct amdgpu_vm_pt { struct amdgpu_bo *bo; -- cgit v1.2.3 From 27c7b9aeecd7c06a3b527795807c19a0bbe25c1e Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 1 Aug 2017 11:27:36 +0200 Subject: drm/amdgpu: rename VM invalidated to moved MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That better describes what happens here with the BO. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 ++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 75e7141c8de4..15d4a28d73bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -825,7 +825,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } - r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); + r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync); if (amdgpu_vm_debug && p->bo_list) { /* Invalidate all BOs to test for userspace bugs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f24554f2d0e5..2ed99b8f7da7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1983,25 +1983,25 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, } /** - * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT + * amdgpu_vm_clear_moved - clear moved BOs in the PT * * @adev: amdgpu_device pointer * @vm: requested vm * - * Make sure all invalidated BOs are cleared in the PT. + * Make sure all moved BOs are cleared in the PT. * Returns 0 for success. * * PTs have to be reserved and mutex must be locked! */ -int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, - struct amdgpu_vm *vm, struct amdgpu_sync *sync) +int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = NULL; int r = 0; spin_lock(&vm->status_lock); - while (!list_empty(&vm->invalidated)) { - bo_va = list_first_entry(&vm->invalidated, + while (!list_empty(&vm->moved)) { + bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, base.vm_status); spin_unlock(&vm->status_lock); @@ -2396,7 +2396,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, spin_lock(&bo_base->vm->status_lock); if (list_empty(&bo_base->vm_status)) list_add(&bo_base->vm_status, - &bo_base->vm->invalidated); + &bo_base->vm->moved); spin_unlock(&bo_base->vm->status_lock); } } @@ -2465,7 +2465,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); - INIT_LIST_HEAD(&vm->invalidated); + INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 95e5e81e1026..a740b57e9eee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -129,7 +129,7 @@ struct amdgpu_vm { spinlock_t status_lock; /* BOs moved, but not yet updated in the PT */ - struct list_head invalidated; + struct list_head moved; /* BOs cleared in the PT because of a move */ struct list_head cleared; @@ -247,8 +247,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); -int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_sync *sync); +int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); -- cgit v1.2.3 From e618d306ded38dc9d37c04dc37e24bf9d62e9c7b Mon Sep 17 00:00:00 2001 From: Roger He Date: Fri, 11 Aug 2017 20:00:41 +0800 Subject: drm/amd/amdgpu: store fragment_size in vm_manager MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit adds fragment_size in the vm_manager structure and implements hardware setup for it. Reviewed-by: Christian König Signed-off-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 +---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 6 +----- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 8 ++++++-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 9 ++++++--- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 9 ++++++--- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 8 ++++++-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 5 +++-- 9 files changed, 33 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1aac5821ac8f..e16229000a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -590,11 +590,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); - dev_info.pte_fragment_size = - (1 << AMDGPU_LOG2_PAGES_PER_FRAG(adev)) * - AMDGPU_GPU_PAGE_SIZE; + dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; - dev_info.cu_active_number = adev->gfx.cu_info.number; dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info.ce_ram_size = adev->gfx.ce_ram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2ed99b8f7da7..efac05d489c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1410,9 +1410,7 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, * Userspace can support this by aligning virtual base address and * allocation size to the fragment size. */ - - /* SI and newer are optimized for 64KB */ - unsigned pages_per_frag = AMDGPU_LOG2_PAGES_PER_FRAG(params->adev); + unsigned pages_per_frag = params->adev->vm_manager.fragment_size; uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag); uint64_t frag_align = 1 << pages_per_frag; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index a740b57e9eee..45a276960d02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -50,11 +50,6 @@ struct amdgpu_bo_list_entry; /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 -/* LOG2 number of continuous pages for the fragment field */ -#define AMDGPU_LOG2_PAGES_PER_FRAG(adev) \ - ((adev)->asic_type < CHIP_VEGA10 ? 4 : \ - (adev)->vm_manager.block_size) - #define AMDGPU_PTE_VALID (1ULL << 0) #define AMDGPU_PTE_SYSTEM (1ULL << 1) #define AMDGPU_PTE_SNOOPED (1ULL << 2) @@ -200,6 +195,7 @@ struct amdgpu_vm_manager { uint32_t num_level; uint64_t vm_size; uint32_t block_size; + uint32_t fragment_size; /* vram base address for page table entry */ u64 vram_base_offset; /* vm pte handling */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 6c8040e616c4..4f2788b61a08 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -124,7 +124,7 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, field; /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); @@ -143,8 +143,9 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); + field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 93c45f26b7c8..2db5c71d696c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -461,6 +461,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) { int r, i; + u32 field; if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -488,10 +489,12 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_L2_CNTL2, VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK | VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK); + + field = adev->vm_manager.fragment_size; WREG32(mmVM_L2_CNTL3, VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | - (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) | - (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); + (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) | + (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); @@ -812,6 +815,7 @@ static int gmc_v6_0_sw_init(void *handle) return r; amdgpu_vm_adjust_size(adev, 64); + adev->vm_manager.fragment_size = 4; adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; adev->mc.mc_mask = 0xffffffffffULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 4a9e84062874..8ffdad954a4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -562,7 +562,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) { int r, i; - u32 tmp; + u32 tmp, field; if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -592,10 +592,12 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32(mmVM_L2_CNTL2, tmp); + + field = adev->vm_manager.fragment_size; tmp = RREG32(mmVM_L2_CNTL3); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); WREG32(mmVM_L2_CNTL3, tmp); /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); @@ -949,6 +951,7 @@ static int gmc_v7_0_sw_init(void *handle) * Max GPUVM size for cayman and SI is 40 bits. */ amdgpu_vm_adjust_size(adev, 64); + adev->vm_manager.fragment_size = 4; adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 85c937b5e40b..a13f6617de79 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -762,7 +762,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) { int r, i; - u32 tmp; + u32 tmp, field; if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -793,10 +793,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32(mmVM_L2_CNTL2, tmp); + + field = adev->vm_manager.fragment_size; tmp = RREG32(mmVM_L2_CNTL3); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); WREG32(mmVM_L2_CNTL3, tmp); /* XXX: set to enable PTE/PDE in system memory */ tmp = RREG32(mmVM_L2_CNTL4); @@ -1047,6 +1049,7 @@ static int gmc_v8_0_sw_init(void *handle) * Max GPUVM size for cayman and SI is 40 bits. */ amdgpu_vm_adjust_size(adev, 64); + adev->vm_manager.fragment_size = 4; adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c22899a08106..f721b4f4373e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -541,10 +541,12 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.vm_size = 1U << 18; adev->vm_manager.block_size = 9; adev->vm_manager.num_level = 3; + adev->vm_manager.fragment_size = 9; } else { /* vm_size is 64GB for legacy 2-level page support*/ amdgpu_vm_adjust_size(adev, 64); adev->vm_manager.num_level = 1; + adev->vm_manager.fragment_size = 9; } break; case CHIP_VEGA10: @@ -558,14 +560,16 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.vm_size = 1U << 18; adev->vm_manager.block_size = 9; adev->vm_manager.num_level = 3; + adev->vm_manager.fragment_size = 9; break; default: break; } - DRM_INFO("vm size is %llu GB, block size is %u-bit\n", + DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n", adev->vm_manager.vm_size, - adev->vm_manager.block_size); + adev->vm_manager.block_size, + adev->vm_manager.fragment_size); /* This interrupt is VMC page fault.*/ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 74cb647da30e..4395a4f12149 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -138,7 +138,7 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, field; /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); @@ -157,8 +157,9 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); + field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); -- cgit v1.2.3 From d07f14be4d11cf323977672342fb0fc6017052f6 Mon Sep 17 00:00:00 2001 From: Roger He Date: Tue, 15 Aug 2017 16:05:59 +0800 Subject: drm/amd/amdgpu: expose fragment size as module parameter (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow overrides on the command line. v2: agd: sqaush in spelling fix and bogus default value warning Reviewed-by: Christian König Signed-off-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 25 +++++++++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 5 ++++- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 3 +-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 3 +-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 3 +-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 9 ++++----- 9 files changed, 44 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 1f915a5ce9ba..12e71bbfd222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -96,6 +96,7 @@ extern int amdgpu_bapm; extern int amdgpu_deep_color; extern int amdgpu_vm_size; extern int amdgpu_vm_block_size; +extern int amdgpu_vm_fragment_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; extern int amdgpu_vm_update_mode; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a6f6cb0f2e02..1a459ac63df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1076,6 +1076,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_gtt_size = -1; } + /* valid range is between 4 and 9 inclusive */ + if (amdgpu_vm_fragment_size != -1 && + (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { + dev_warn(adev->dev, "valid range is between 4 and 9\n"); + amdgpu_vm_fragment_size = -1; + } + amdgpu_check_vm_size(adev); amdgpu_check_block_size(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 5e9ce8a29669..353e3467e5c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -94,6 +94,7 @@ unsigned amdgpu_ip_block_mask = 0xffffffff; int amdgpu_bapm = -1; int amdgpu_deep_color = 0; int amdgpu_vm_size = -1; +int amdgpu_vm_fragment_size = -1; int amdgpu_vm_block_size = -1; int amdgpu_vm_fault_stop = 0; int amdgpu_vm_debug = 0; @@ -183,6 +184,9 @@ module_param_named(deep_color, amdgpu_deep_color, int, 0444); MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)"); module_param_named(vm_size, amdgpu_vm_size, int, 0444); +MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)"); +module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444); + MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index efac05d489c9..6b1343e5541d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2413,12 +2413,26 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) } /** - * amdgpu_vm_adjust_size - adjust vm size and block size + * amdgpu_vm_set_fragment_size - adjust fragment size in PTE + * + * @adev: amdgpu_device pointer + * @fragment_size_default: the default fragment size if it's set auto + */ +void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default) +{ + if (amdgpu_vm_fragment_size == -1) + adev->vm_manager.fragment_size = fragment_size_default; + else + adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; +} + +/** + * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size * * @adev: amdgpu_device pointer * @vm_size: the default vm size if it's set auto */ -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size) +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default) { /* adjust vm size firstly */ if (amdgpu_vm_size == -1) @@ -2433,8 +2447,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size) else adev->vm_manager.block_size = amdgpu_vm_block_size; - DRM_INFO("vm size is %llu GB, block size is %u-bit\n", - adev->vm_manager.vm_size, adev->vm_manager.block_size); + amdgpu_vm_set_fragment_size(adev, fragment_size_default); + + DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n", + adev->vm_manager.vm_size, adev->vm_manager.block_size, + adev->vm_manager.fragment_size); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 45a276960d02..ba6691b58ee7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -271,7 +271,10 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, uint64_t saddr, uint64_t size); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size); +void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, + uint32_t fragment_size_default); +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, + uint32_t fragment_size_default); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 2db5c71d696c..12b0c4cd7a5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -814,8 +814,7 @@ static int gmc_v6_0_sw_init(void *handle) if (r) return r; - amdgpu_vm_adjust_size(adev, 64); - adev->vm_manager.fragment_size = 4; + amdgpu_vm_adjust_size(adev, 64, 4); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; adev->mc.mc_mask = 0xffffffffffULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 8ffdad954a4a..e42c1ad3af5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -950,8 +950,7 @@ static int gmc_v7_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64); - adev->vm_manager.fragment_size = 4; + amdgpu_vm_adjust_size(adev, 64, 4); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index a13f6617de79..7ca2dae8237a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1048,8 +1048,7 @@ static int gmc_v8_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64); - adev->vm_manager.fragment_size = 4; + amdgpu_vm_adjust_size(adev, 64, 4); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f721b4f4373e..2769c2b3b56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -541,12 +541,11 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.vm_size = 1U << 18; adev->vm_manager.block_size = 9; adev->vm_manager.num_level = 3; - adev->vm_manager.fragment_size = 9; + amdgpu_vm_set_fragment_size(adev, 9); } else { - /* vm_size is 64GB for legacy 2-level page support*/ - amdgpu_vm_adjust_size(adev, 64); + /* vm_size is 64GB for legacy 2-level page support */ + amdgpu_vm_adjust_size(adev, 64, 9); adev->vm_manager.num_level = 1; - adev->vm_manager.fragment_size = 9; } break; case CHIP_VEGA10: @@ -560,7 +559,7 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.vm_size = 1U << 18; adev->vm_manager.block_size = 9; adev->vm_manager.num_level = 3; - adev->vm_manager.fragment_size = 9; + amdgpu_vm_set_fragment_size(adev, 9); break; default: break; -- cgit v1.2.3 From 34d7be5dc28041b95254da517fd0f0f740544ece Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 24 Aug 2017 12:32:55 +0200 Subject: drm/amdgpu: fix and cleanup VM ready check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stop checking the mapped BO itself, cause that one is certainly not a page table. Additional to that move the code into amdgpu_vm.c Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 33 ++------------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 + 3 files changed, 35 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7171968f261e..9b1b6bdd4841 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -127,35 +127,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, return 0; } -static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo) -{ - /* if anything is swapped out don't swap it in here, - just abort and wait for the next CS */ - if (!amdgpu_bo_gpu_accessible(bo)) - return -ERESTARTSYS; - - if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) - return -ERESTARTSYS; - - return 0; -} - -static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct list_head *list) -{ - struct ttm_validate_buffer *entry; - - list_for_each_entry(entry, list, head) { - struct amdgpu_bo *bo = - container_of(entry->bo, struct amdgpu_bo, tbo); - if (amdgpu_gem_vm_check(NULL, bo)) - return false; - } - - return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL); -} - void amdgpu_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv) { @@ -189,7 +160,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, if (bo_va && --bo_va->ref_count == 0) { amdgpu_vm_bo_rmv(adev, bo_va); - if (amdgpu_gem_vm_ready(adev, vm, &list)) { + if (amdgpu_vm_ready(adev, vm)) { struct dma_fence *fence = NULL; r = amdgpu_vm_clear_freed(adev, vm, &fence); @@ -513,7 +484,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, { int r = -ERESTARTSYS; - if (!amdgpu_gem_vm_ready(adev, vm, list)) + if (!amdgpu_vm_ready(adev, vm)) goto error; r = amdgpu_vm_update_directories(adev, vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index db63ff5c80f2..67c37b22f8ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -231,6 +231,38 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, adev->mman.bdev.glob); } +/** + * amdgpu_vm_check - helper for amdgpu_vm_ready + */ +static int amdgpu_vm_check(void *param, struct amdgpu_bo *bo) +{ + /* if anything is swapped out don't swap it in here, + just abort and wait for the next CS */ + if (!amdgpu_bo_gpu_accessible(bo)) + return -ERESTARTSYS; + + if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) + return -ERESTARTSYS; + + return 0; +} + +/** + * amdgpu_vm_ready - check VM is ready for updates + * + * @adev: amdgpu device + * @vm: VM to check + * + * Check if all VM PDs/PTs are ready for updates + */ +bool amdgpu_vm_ready(struct amdgpu_device *adev, struct amdgpu_vm *vm) +{ + if (amdgpu_vm_check(NULL, vm->root.bo)) + return false; + + return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_vm_check, NULL); +} + /** * amdgpu_vm_alloc_levels - allocate the PD/PT levels * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ba6691b58ee7..9347d28c3c1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -225,6 +225,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); +bool amdgpu_vm_ready(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); -- cgit v1.2.3 From 3d7d4d3a1b9f67c0caecf2b2aa8d7c347f074a33 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 23 Aug 2017 16:13:33 +0200 Subject: drm/amdgpu: rework moved handling in the VM v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of using the vm_state use a separate flag to note that the BO was moved. v2: reorder patches to avoid temporary lockless access Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 13 ++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +++ 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 67c37b22f8ef..1b36c62997b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1788,10 +1788,16 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, else flags = 0x0; - spin_lock(&vm->status_lock); - if (!list_empty(&bo_va->base.vm_status)) + if (!clear && bo_va->base.moved) { + bo_va->base.moved = false; list_splice_init(&bo_va->valids, &bo_va->invalids); - spin_unlock(&vm->status_lock); + + } else { + spin_lock(&vm->status_lock); + if (!list_empty(&bo_va->base.vm_status)) + list_splice_init(&bo_va->valids, &bo_va->invalids); + spin_unlock(&vm->status_lock); + } list_for_each_entry(mapping, &bo_va->invalids, list) { r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, @@ -2419,6 +2425,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_vm_bo_base *bo_base; list_for_each_entry(bo_base, &bo->va, bo_list) { + bo_base->moved = true; spin_lock(&bo_base->vm->status_lock); if (list_empty(&bo_base->vm_status)) list_add(&bo_base->vm_status, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 9347d28c3c1e..1b478e62a948 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -105,6 +105,9 @@ struct amdgpu_vm_bo_base { /* protected by spinlock */ struct list_head vm_status; + + /* protected by the BO being reserved */ + bool moved; }; struct amdgpu_vm_pt { -- cgit v1.2.3 From cb7b6ec2f8b8759b6b5beb4d17ea6984867a3296 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 15 Aug 2017 17:08:12 +0200 Subject: drm/amdgpu: add bo_va cleared flag again v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We changed this to use an extra list a while back, but for the next series I need a separate flag again. v2: reorder to avoid unlocked list access Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 35 +++++++++++------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 --- 3 files changed, 16 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index a288fa6d72c8..e613ba42f167 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -55,6 +55,9 @@ struct amdgpu_bo_va { /* mappings for this bo_va */ struct list_head invalids; struct list_head valids; + + /* If the mappings are cleared or filled */ + bool cleared; }; struct amdgpu_bo { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1b36c62997b3..1334bbb82634 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1792,11 +1792,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, bo_va->base.moved = false; list_splice_init(&bo_va->valids, &bo_va->invalids); - } else { - spin_lock(&vm->status_lock); - if (!list_empty(&bo_va->base.vm_status)) - list_splice_init(&bo_va->valids, &bo_va->invalids); - spin_unlock(&vm->status_lock); + } else if (bo_va->cleared != clear) { + list_splice_init(&bo_va->valids, &bo_va->invalids); } list_for_each_entry(mapping, &bo_va->invalids, list) { @@ -1807,25 +1804,22 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, return r; } - if (trace_amdgpu_vm_bo_mapping_enabled()) { - list_for_each_entry(mapping, &bo_va->valids, list) - trace_amdgpu_vm_bo_mapping(mapping); - - list_for_each_entry(mapping, &bo_va->invalids, list) - trace_amdgpu_vm_bo_mapping(mapping); + if (vm->use_cpu_for_update) { + /* Flush HDP */ + mb(); + amdgpu_gart_flush_gpu_tlb(adev, 0); } spin_lock(&vm->status_lock); - list_splice_init(&bo_va->invalids, &bo_va->valids); list_del_init(&bo_va->base.vm_status); - if (clear) - list_add(&bo_va->base.vm_status, &vm->cleared); spin_unlock(&vm->status_lock); - if (vm->use_cpu_for_update) { - /* Flush HDP */ - mb(); - amdgpu_gart_flush_gpu_tlb(adev, 0); + list_splice_init(&bo_va->invalids, &bo_va->valids); + bo_va->cleared = clear; + + if (trace_amdgpu_vm_bo_mapping_enabled()) { + list_for_each_entry(mapping, &bo_va->valids, list) + trace_amdgpu_vm_bo_mapping(mapping); } return 0; @@ -2427,9 +2421,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, list_for_each_entry(bo_base, &bo->va, bo_list) { bo_base->moved = true; spin_lock(&bo_base->vm->status_lock); - if (list_empty(&bo_base->vm_status)) - list_add(&bo_base->vm_status, - &bo_base->vm->moved); + list_move(&bo_base->vm_status, &bo_base->vm->moved); spin_unlock(&bo_base->vm->status_lock); } } @@ -2516,7 +2508,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->moved); - INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); /* create scheduler entity for page table updates */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 1b478e62a948..ff093d4b5e11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -129,9 +129,6 @@ struct amdgpu_vm { /* BOs moved, but not yet updated in the PT */ struct list_head moved; - /* BOs cleared in the PT because of a move */ - struct list_head cleared; - /* BO mappings freed, but not yet updated in the PT */ struct list_head freed; -- cgit v1.2.3 From 3f3333f8a0e90ac26f84ed7b0aa344efce695c08 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 3 Aug 2017 14:02:13 +0200 Subject: drm/amdgpu: track evicted page tables v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of validating all page tables when one was evicted, track which one needs a validation. v2: simplify amdgpu_vm_ready as well Signed-off-by: Christian König Reviewed-by: Alex Deucher (v1) Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 227 +++++++++++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 16 +- 5 files changed, 119 insertions(+), 141 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d6ddd5562c16..8bf178a912f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -636,9 +636,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, p->bytes_moved_vis); - fpriv->vm.last_eviction_counter = - atomic64_read(&p->adev->num_evictions); - if (p->bo_list) { struct amdgpu_bo *gds = p->bo_list->gds_obj; struct amdgpu_bo *gws = p->bo_list->gws_obj; @@ -835,7 +832,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) if (!bo) continue; - amdgpu_vm_bo_invalidate(adev, bo); + amdgpu_vm_bo_invalidate(adev, bo, false); } } @@ -860,7 +857,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, } if (p->job->vm) { - p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo); + p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo); r = amdgpu_bo_vm_update_pte(p); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index ba012933e6aa..d02880640ee7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -160,7 +160,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, if (bo_va && --bo_va->ref_count == 0) { amdgpu_vm_bo_rmv(adev, bo_va); - if (amdgpu_vm_ready(adev, vm)) { + if (amdgpu_vm_ready(vm)) { struct dma_fence *fence = NULL; r = amdgpu_vm_clear_freed(adev, vm, &fence); @@ -481,10 +481,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct list_head *list, uint32_t operation) { - int r = -ERESTARTSYS; + int r; - if (!amdgpu_vm_ready(adev, vm)) - goto error; + if (!amdgpu_vm_ready(vm)) + return; r = amdgpu_vm_update_directories(adev, vm); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9e495da0bb03..52d0109c0d9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -929,7 +929,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, return; abo = container_of(bo, struct amdgpu_bo, tbo); - amdgpu_vm_bo_invalidate(adev, abo); + amdgpu_vm_bo_invalidate(adev, abo, evict); amdgpu_bo_kunmap(abo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1334bbb82634..6ff3c1bf035e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -140,7 +140,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry) { - entry->robj = vm->root.bo; + entry->robj = vm->root.base.bo; entry->priority = 0; entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; @@ -148,61 +148,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, list_add(&entry->tv.head, validated); } -/** - * amdgpu_vm_validate_layer - validate a single page table level - * - * @parent: parent page table level - * @validate: callback to do the validation - * @param: parameter for the validation callback - * - * Validate the page table BOs on command submission if neccessary. - */ -static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, - int (*validate)(void *, struct amdgpu_bo *), - void *param, bool use_cpu_for_update, - struct ttm_bo_global *glob) -{ - unsigned i; - int r; - - if (use_cpu_for_update) { - r = amdgpu_bo_kmap(parent->bo, NULL); - if (r) - return r; - } - - if (!parent->entries) - return 0; - - for (i = 0; i <= parent->last_entry_used; ++i) { - struct amdgpu_vm_pt *entry = &parent->entries[i]; - - if (!entry->bo) - continue; - - r = validate(param, entry->bo); - if (r) - return r; - - spin_lock(&glob->lru_lock); - ttm_bo_move_to_lru_tail(&entry->bo->tbo); - if (entry->bo->shadow) - ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo); - spin_unlock(&glob->lru_lock); - - /* - * Recurse into the sub directory. This is harmless because we - * have only a maximum of 5 layers. - */ - r = amdgpu_vm_validate_level(entry, validate, param, - use_cpu_for_update, glob); - if (r) - return r; - } - - return r; -} - /** * amdgpu_vm_validate_pt_bos - validate the page table BOs * @@ -217,32 +162,43 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*validate)(void *p, struct amdgpu_bo *bo), void *param) { - uint64_t num_evictions; + struct ttm_bo_global *glob = adev->mman.bdev.glob; + int r; - /* We only need to validate the page tables - * if they aren't already valid. - */ - num_evictions = atomic64_read(&adev->num_evictions); - if (num_evictions == vm->last_eviction_counter) - return 0; + spin_lock(&vm->status_lock); + while (!list_empty(&vm->evicted)) { + struct amdgpu_vm_bo_base *bo_base; + struct amdgpu_bo *bo; - return amdgpu_vm_validate_level(&vm->root, validate, param, - vm->use_cpu_for_update, - adev->mman.bdev.glob); -} + bo_base = list_first_entry(&vm->evicted, + struct amdgpu_vm_bo_base, + vm_status); + spin_unlock(&vm->status_lock); -/** - * amdgpu_vm_check - helper for amdgpu_vm_ready - */ -static int amdgpu_vm_check(void *param, struct amdgpu_bo *bo) -{ - /* if anything is swapped out don't swap it in here, - just abort and wait for the next CS */ - if (!amdgpu_bo_gpu_accessible(bo)) - return -ERESTARTSYS; + bo = bo_base->bo; + BUG_ON(!bo); + if (bo->parent) { + r = validate(param, bo); + if (r) + return r; - if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) - return -ERESTARTSYS; + spin_lock(&glob->lru_lock); + ttm_bo_move_to_lru_tail(&bo->tbo); + if (bo->shadow) + ttm_bo_move_to_lru_tail(&bo->shadow->tbo); + spin_unlock(&glob->lru_lock); + } + + if (vm->use_cpu_for_update) { + r = amdgpu_bo_kmap(bo, NULL); + if (r) + return r; + } + + spin_lock(&vm->status_lock); + list_del_init(&bo_base->vm_status); + } + spin_unlock(&vm->status_lock); return 0; } @@ -250,17 +206,19 @@ static int amdgpu_vm_check(void *param, struct amdgpu_bo *bo) /** * amdgpu_vm_ready - check VM is ready for updates * - * @adev: amdgpu device * @vm: VM to check * * Check if all VM PDs/PTs are ready for updates */ -bool amdgpu_vm_ready(struct amdgpu_device *adev, struct amdgpu_vm *vm) +bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - if (amdgpu_vm_check(NULL, vm->root.bo)) - return false; + bool ready; + + spin_lock(&vm->status_lock); + ready = list_empty(&vm->evicted); + spin_unlock(&vm->status_lock); - return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_vm_check, NULL); + return ready; } /** @@ -326,11 +284,11 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { - struct reservation_object *resv = vm->root.bo->tbo.resv; + struct reservation_object *resv = vm->root.base.bo->tbo.resv; struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; struct amdgpu_bo *pt; - if (!entry->bo) { + if (!entry->base.bo) { r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, level), AMDGPU_GPU_PAGE_SIZE, true, @@ -351,9 +309,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, /* Keep a reference to the root directory to avoid * freeing them up in the wrong order. */ - pt->parent = amdgpu_bo_ref(vm->root.bo); + pt->parent = amdgpu_bo_ref(vm->root.base.bo); - entry->bo = pt; + entry->base.vm = vm; + entry->base.bo = pt; + list_add_tail(&entry->base.bo_list, &pt->va); + INIT_LIST_HEAD(&entry->base.vm_status); entry->addr = 0; } @@ -1020,7 +981,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, int r; amdgpu_sync_create(&sync); - amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner); + amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner); r = amdgpu_sync_wait(&sync, true); amdgpu_sync_free(&sync); @@ -1059,10 +1020,10 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; - shadow = parent->bo->shadow; + shadow = parent->base.bo->shadow; if (vm->use_cpu_for_update) { - pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); if (unlikely(r)) return r; @@ -1078,7 +1039,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, /* assume the worst case */ ndw += parent->last_entry_used * 6; - pd_addr = amdgpu_bo_gpu_offset(parent->bo); + pd_addr = amdgpu_bo_gpu_offset(parent->base.bo); if (shadow) { shadow_addr = amdgpu_bo_gpu_offset(shadow); @@ -1098,7 +1059,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, /* walk over the address space and update the directory */ for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { - struct amdgpu_bo *bo = parent->entries[pt_idx].bo; + struct amdgpu_bo *bo = parent->entries[pt_idx].base.bo; uint64_t pde, pt; if (bo == NULL) @@ -1141,7 +1102,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, } if (count) { - if (vm->root.bo->shadow) + if (vm->root.base.bo->shadow) params.func(¶ms, last_shadow, last_pt, count, incr, AMDGPU_PTE_VALID); @@ -1154,7 +1115,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, amdgpu_job_free(job); } else { amdgpu_ring_pad_ib(ring, params.ib); - amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, + amdgpu_sync_resv(adev, &job->sync, + parent->base.bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); if (shadow) amdgpu_sync_resv(adev, &job->sync, @@ -1167,7 +1129,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_bo_fence(parent->bo, fence, true); + amdgpu_bo_fence(parent->base.bo, fence, true); dma_fence_put(vm->last_dir_update); vm->last_dir_update = dma_fence_get(fence); dma_fence_put(fence); @@ -1180,7 +1142,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; - if (!entry->bo) + if (!entry->base.bo) continue; r = amdgpu_vm_update_level(adev, vm, entry, level + 1); @@ -1213,7 +1175,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; - if (!entry->bo) + if (!entry->base.bo) continue; entry->addr = ~0ULL; @@ -1268,7 +1230,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, *entry = &p->vm->root; while ((*entry)->entries) { idx = addr >> (p->adev->vm_manager.block_size * level--); - idx %= amdgpu_bo_size((*entry)->bo) / 8; + idx %= amdgpu_bo_size((*entry)->base.bo) / 8; *parent = *entry; *entry = &(*entry)->entries[idx]; } @@ -1304,7 +1266,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, p->src || !(flags & AMDGPU_PTE_VALID)) { - dst = amdgpu_bo_gpu_offset(entry->bo); + dst = amdgpu_bo_gpu_offset(entry->base.bo); dst = amdgpu_gart_get_vm_pde(p->adev, dst); flags = AMDGPU_PTE_VALID; } else { @@ -1330,18 +1292,18 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, tmp = p->pages_addr; p->pages_addr = NULL; - pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); p->pages_addr = tmp; } else { - if (parent->bo->shadow) { - pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); + if (parent->base.bo->shadow) { + pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); } - pd_addr = amdgpu_bo_gpu_offset(parent->bo); + pd_addr = amdgpu_bo_gpu_offset(parent->base.bo); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); } @@ -1392,7 +1354,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, if (entry->addr & AMDGPU_PDE_PTE) continue; - pt = entry->bo; + pt = entry->base.bo; if (use_cpu_update) { pe_start = (unsigned long)amdgpu_bo_kptr(pt); } else { @@ -1612,12 +1574,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv, + r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv, owner); if (r) goto error_free; - r = reservation_object_reserve_shared(vm->root.bo->tbo.resv); + r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); if (r) goto error_free; @@ -1632,7 +1594,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_bo_fence(vm->root.bo, f, true); + amdgpu_bo_fence(vm->root.base.bo, f, true); dma_fence_put(*fence); *fence = f; return 0; @@ -1927,7 +1889,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, */ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct reservation_object *resv = vm->root.bo->tbo.resv; + struct reservation_object *resv = vm->root.base.bo->tbo.resv; struct dma_fence *excl, **shared; unsigned i, shared_count; int r; @@ -2414,12 +2376,25 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, * Mark @bo as invalid. */ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, - struct amdgpu_bo *bo) + struct amdgpu_bo *bo, bool evicted) { struct amdgpu_vm_bo_base *bo_base; list_for_each_entry(bo_base, &bo->va, bo_list) { + struct amdgpu_vm *vm = bo_base->vm; + bo_base->moved = true; + if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { + spin_lock(&bo_base->vm->status_lock); + list_move(&bo_base->vm_status, &vm->evicted); + spin_unlock(&bo_base->vm->status_lock); + continue; + } + + /* Don't add page tables to the moved state */ + if (bo->tbo.type == ttm_bo_type_kernel) + continue; + spin_lock(&bo_base->vm->status_lock); list_move(&bo_base->vm_status, &bo_base->vm->moved); spin_unlock(&bo_base->vm->status_lock); @@ -2507,6 +2482,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); + INIT_LIST_HEAD(&vm->evicted); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->freed); @@ -2551,30 +2527,31 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, NULL, init_pde_value, &vm->root.bo); + NULL, NULL, init_pde_value, &vm->root.base.bo); if (r) goto error_free_sched_entity; - r = amdgpu_bo_reserve(vm->root.bo, false); - if (r) - goto error_free_root; - - vm->last_eviction_counter = atomic64_read(&adev->num_evictions); + vm->root.base.vm = vm; + list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va); + INIT_LIST_HEAD(&vm->root.base.vm_status); if (vm->use_cpu_for_update) { - r = amdgpu_bo_kmap(vm->root.bo, NULL); + r = amdgpu_bo_reserve(vm->root.base.bo, false); if (r) goto error_free_root; - } - amdgpu_bo_unreserve(vm->root.bo); + r = amdgpu_bo_kmap(vm->root.base.bo, NULL); + if (r) + goto error_free_root; + amdgpu_bo_unreserve(vm->root.base.bo); + } return 0; error_free_root: - amdgpu_bo_unref(&vm->root.bo->shadow); - amdgpu_bo_unref(&vm->root.bo); - vm->root.bo = NULL; + amdgpu_bo_unref(&vm->root.base.bo->shadow); + amdgpu_bo_unref(&vm->root.base.bo); + vm->root.base.bo = NULL; error_free_sched_entity: amd_sched_entity_fini(&ring->sched, &vm->entity); @@ -2593,9 +2570,11 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level) { unsigned i; - if (level->bo) { - amdgpu_bo_unref(&level->bo->shadow); - amdgpu_bo_unref(&level->bo); + if (level->base.bo) { + list_del(&level->base.bo_list); + list_del(&level->base.vm_status); + amdgpu_bo_unref(&level->base.bo->shadow); + amdgpu_bo_unref(&level->base.bo); } if (level->entries) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ff093d4b5e11..4e465e817fe8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -111,12 +111,12 @@ struct amdgpu_vm_bo_base { }; struct amdgpu_vm_pt { - struct amdgpu_bo *bo; - uint64_t addr; + struct amdgpu_vm_bo_base base; + uint64_t addr; /* array of page tables, one for each directory entry */ - struct amdgpu_vm_pt *entries; - unsigned last_entry_used; + struct amdgpu_vm_pt *entries; + unsigned last_entry_used; }; struct amdgpu_vm { @@ -126,6 +126,9 @@ struct amdgpu_vm { /* protecting invalidated */ spinlock_t status_lock; + /* BOs who needs a validation */ + struct list_head evicted; + /* BOs moved, but not yet updated in the PT */ struct list_head moved; @@ -135,7 +138,6 @@ struct amdgpu_vm { /* contains the page directory */ struct amdgpu_vm_pt root; struct dma_fence *last_dir_update; - uint64_t last_eviction_counter; /* protecting freed */ spinlock_t freed_lock; @@ -225,7 +227,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); -bool amdgpu_vm_ready(struct amdgpu_device *adev, struct amdgpu_vm *vm); +bool amdgpu_vm_ready(struct amdgpu_vm *vm); int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); @@ -250,7 +252,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, - struct amdgpu_bo *bo); + struct amdgpu_bo *bo, bool evicted); struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, struct amdgpu_bo *bo); struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, -- cgit v1.2.3 From ea09729c930223edf492d0ca647c27e7eb0ccb12 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 9 Aug 2017 14:15:46 +0200 Subject: drm/amdgpu: rework page directory filling v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Keep track off relocated PDs/PTs instead of walking and checking all PDs. v2: fix root PD handling Signed-off-by: Christian König Reviewed-by: Alex Deucher (v1) Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 89 +++++++++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 ++ 2 files changed, 63 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6ff3c1bf035e..faa08d5728da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -196,7 +196,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } spin_lock(&vm->status_lock); - list_del_init(&bo_base->vm_status); + list_move(&bo_base->vm_status, &vm->relocated); } spin_unlock(&vm->status_lock); @@ -314,8 +314,10 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, entry->base.vm = vm; entry->base.bo = pt; list_add_tail(&entry->base.bo_list, &pt->va); - INIT_LIST_HEAD(&entry->base.vm_status); - entry->addr = 0; + spin_lock(&vm->status_lock); + list_add(&entry->base.vm_status, &vm->relocated); + spin_unlock(&vm->status_lock); + entry->addr = ~0ULL; } if (level < adev->vm_manager.num_level) { @@ -1000,18 +1002,17 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ static int amdgpu_vm_update_level(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_vm_pt *parent, - unsigned level) + struct amdgpu_vm_pt *parent) { struct amdgpu_bo *shadow; struct amdgpu_ring *ring = NULL; uint64_t pd_addr, shadow_addr = 0; - uint32_t incr = amdgpu_vm_bo_size(adev, level + 1); uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0; unsigned count = 0, pt_idx, ndw = 0; struct amdgpu_job *job; struct amdgpu_pte_update_params params; struct dma_fence *fence = NULL; + uint32_t incr; int r; @@ -1059,12 +1060,17 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, /* walk over the address space and update the directory */ for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { - struct amdgpu_bo *bo = parent->entries[pt_idx].base.bo; + struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; + struct amdgpu_bo *bo = entry->base.bo; uint64_t pde, pt; if (bo == NULL) continue; + spin_lock(&vm->status_lock); + list_del_init(&entry->base.vm_status); + spin_unlock(&vm->status_lock); + pt = amdgpu_bo_gpu_offset(bo); pt = amdgpu_gart_get_vm_pde(adev, pt); /* Don't update huge pages here */ @@ -1075,6 +1081,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID; pde = pd_addr + pt_idx * 8; + incr = amdgpu_bo_size(bo); if (((last_pde + 8 * count) != pde) || ((last_pt + incr * count) != pt) || (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { @@ -1135,20 +1142,6 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, dma_fence_put(fence); } } - /* - * Recurse into the subdirectories. This recursion is harmless because - * we only have a maximum of 5 layers. - */ - for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { - struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; - - if (!entry->base.bo) - continue; - - r = amdgpu_vm_update_level(adev, vm, entry, level + 1); - if (r) - return r; - } return 0; @@ -1164,7 +1157,8 @@ error_free: * * Mark all PD level as invalid after an error. */ -static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) +static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm, + struct amdgpu_vm_pt *parent) { unsigned pt_idx; @@ -1179,7 +1173,10 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) continue; entry->addr = ~0ULL; - amdgpu_vm_invalidate_level(entry); + spin_lock(&vm->status_lock); + list_move(&entry->base.vm_status, &vm->relocated); + spin_unlock(&vm->status_lock); + amdgpu_vm_invalidate_level(vm, entry); } } @@ -1197,9 +1194,38 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, { int r; - r = amdgpu_vm_update_level(adev, vm, &vm->root, 0); - if (r) - amdgpu_vm_invalidate_level(&vm->root); + spin_lock(&vm->status_lock); + while (!list_empty(&vm->relocated)) { + struct amdgpu_vm_bo_base *bo_base; + struct amdgpu_bo *bo; + + bo_base = list_first_entry(&vm->relocated, + struct amdgpu_vm_bo_base, + vm_status); + spin_unlock(&vm->status_lock); + + bo = bo_base->bo->parent; + if (bo) { + struct amdgpu_vm_bo_base *parent; + struct amdgpu_vm_pt *pt; + + parent = list_first_entry(&bo->va, + struct amdgpu_vm_bo_base, + bo_list); + pt = container_of(parent, struct amdgpu_vm_pt, base); + + r = amdgpu_vm_update_level(adev, vm, pt); + if (r) { + amdgpu_vm_invalidate_level(vm, &vm->root); + return r; + } + spin_lock(&vm->status_lock); + } else { + spin_lock(&vm->status_lock); + list_del_init(&bo_base->vm_status); + } + } + spin_unlock(&vm->status_lock); if (vm->use_cpu_for_update) { /* Flush HDP */ @@ -1601,7 +1627,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, error_free: amdgpu_job_free(job); - amdgpu_vm_invalidate_level(&vm->root); + amdgpu_vm_invalidate_level(vm, &vm->root); return r; } @@ -2391,9 +2417,13 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, continue; } - /* Don't add page tables to the moved state */ - if (bo->tbo.type == ttm_bo_type_kernel) + if (bo->tbo.type == ttm_bo_type_kernel) { + spin_lock(&bo_base->vm->status_lock); + if (list_empty(&bo_base->vm_status)) + list_add(&bo_base->vm_status, &vm->relocated); + spin_unlock(&bo_base->vm->status_lock); continue; + } spin_lock(&bo_base->vm->status_lock); list_move(&bo_base->vm_status, &bo_base->vm->moved); @@ -2483,6 +2513,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->evicted); + INIT_LIST_HEAD(&vm->relocated); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->freed); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 4e465e817fe8..c3753afe9853 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -129,6 +129,9 @@ struct amdgpu_vm { /* BOs who needs a validation */ struct list_head evicted; + /* PT BOs which relocated and their parent need an update */ + struct list_head relocated; + /* BOs moved, but not yet updated in the PT */ struct list_head moved; -- cgit v1.2.3 From 73fb16e7ebee12953de32a7a2552e0cf2bf74ebf Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 16 Aug 2017 11:13:48 +0200 Subject: drm/amdgpu: add support for per VM BOs v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Per VM BOs are handled like VM PDs and PTs. They are always valid and don't need to be specified in the BO lists. v2: validate PDs/PTs first Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 79 ++++++++++++++++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 5 ++- 3 files changed, 60 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 8bf178a912f2..b57adc0723cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -822,7 +822,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } - r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync); + r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync); if (amdgpu_vm_debug && p->bo_list) { /* Invalidate all BOs to test for userspace bugs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d3c48557555c..26eb7dce5fe5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -189,14 +189,18 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_unlock(&glob->lru_lock); } - if (vm->use_cpu_for_update) { + if (bo->tbo.type == ttm_bo_type_kernel && + vm->use_cpu_for_update) { r = amdgpu_bo_kmap(bo, NULL); if (r) return r; } spin_lock(&vm->status_lock); - list_move(&bo_base->vm_status, &vm->relocated); + if (bo->tbo.type != ttm_bo_type_kernel) + list_move(&bo_base->vm_status, &vm->moved); + else + list_move(&bo_base->vm_status, &vm->relocated); } spin_unlock(&vm->status_lock); @@ -1985,20 +1989,23 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, } /** - * amdgpu_vm_clear_moved - clear moved BOs in the PT + * amdgpu_vm_handle_moved - handle moved BOs in the PT * * @adev: amdgpu_device pointer * @vm: requested vm + * @sync: sync object to add fences to * - * Make sure all moved BOs are cleared in the PT. + * Make sure all BOs which are moved are updated in the PTs. * Returns 0 for success. * - * PTs have to be reserved and mutex must be locked! + * PTs have to be reserved! */ -int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_sync *sync) +int amdgpu_vm_handle_moved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = NULL; + bool clear; int r = 0; spin_lock(&vm->status_lock); @@ -2007,7 +2014,10 @@ int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va, base.vm_status); spin_unlock(&vm->status_lock); - r = amdgpu_vm_bo_update(adev, bo_va, true); + /* Per VM BOs never need to bo cleared in the page tables */ + clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv; + + r = amdgpu_vm_bo_update(adev, bo_va, clear); if (r) return r; @@ -2059,6 +2069,37 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, return bo_va; } + +/** + * amdgpu_vm_bo_insert_mapping - insert a new mapping + * + * @adev: amdgpu_device pointer + * @bo_va: bo_va to store the address + * @mapping: the mapping to insert + * + * Insert a new mapping into all structures. + */ +static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + struct amdgpu_bo_va_mapping *mapping) +{ + struct amdgpu_vm *vm = bo_va->base.vm; + struct amdgpu_bo *bo = bo_va->base.bo; + + list_add(&mapping->list, &bo_va->invalids); + amdgpu_vm_it_insert(mapping, &vm->va); + + if (mapping->flags & AMDGPU_PTE_PRT) + amdgpu_vm_prt_get(adev); + + if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { + spin_lock(&vm->status_lock); + list_move(&bo_va->base.vm_status, &vm->moved); + spin_unlock(&vm->status_lock); + } + trace_amdgpu_vm_bo_map(bo_va, mapping); +} + /** * amdgpu_vm_bo_map - map bo inside a vm * @@ -2110,18 +2151,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (!mapping) return -ENOMEM; - INIT_LIST_HEAD(&mapping->list); mapping->start = saddr; mapping->last = eaddr; mapping->offset = offset; mapping->flags = flags; - list_add(&mapping->list, &bo_va->invalids); - amdgpu_vm_it_insert(mapping, &vm->va); - - if (flags & AMDGPU_PTE_PRT) - amdgpu_vm_prt_get(adev); - trace_amdgpu_vm_bo_map(bo_va, mapping); + amdgpu_vm_bo_insert_map(adev, bo_va, mapping); return 0; } @@ -2148,7 +2183,6 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo *bo = bo_va->base.bo; - struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; int r; @@ -2182,12 +2216,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, mapping->offset = offset; mapping->flags = flags; - list_add(&mapping->list, &bo_va->invalids); - amdgpu_vm_it_insert(mapping, &vm->va); - - if (flags & AMDGPU_PTE_PRT) - amdgpu_vm_prt_get(adev); - trace_amdgpu_vm_bo_map(bo_va, mapping); + amdgpu_vm_bo_insert_map(adev, bo_va, mapping); return 0; } @@ -2402,7 +2431,11 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, bo_base->moved = true; if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { spin_lock(&bo_base->vm->status_lock); - list_move(&bo_base->vm_status, &vm->evicted); + if (bo->tbo.type == ttm_bo_type_kernel) + list_move(&bo_base->vm_status, &vm->evicted); + else + list_move_tail(&bo_base->vm_status, + &vm->evicted); spin_unlock(&bo_base->vm->status_lock); continue; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index c3753afe9853..90b7741d024b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -249,8 +249,9 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); -int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_sync *sync); +int amdgpu_vm_handle_moved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); -- cgit v1.2.3 From f808c13fd3738948e10196496959871130612b61 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Fri, 8 Sep 2017 16:15:08 -0700 Subject: lib/interval_tree: fast overlap detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow interval trees to quickly check for overlaps to avoid unnecesary tree lookups in interval_tree_iter_first(). As of this patch, all interval tree flavors will require using a 'rb_root_cached' such that we can have the leftmost node easily available. While most users will make use of this feature, those with special functions (in addition to the generic insert, delete, search calls) will avoid using the cached option as they can do funky things with insertions -- for example, vma_interval_tree_insert_after(). [jglisse@redhat.com: fix deadlock from typo vm_lock_anon_vma()] Link: http://lkml.kernel.org/r/20170808225719.20723-1-jglisse@redhat.com Link: http://lkml.kernel.org/r/20170719014603.19029-12-dave@stgolabs.net Signed-off-by: Davidlohr Bueso Signed-off-by: Jérôme Glisse Acked-by: Christian König Acked-by: Peter Zijlstra (Intel) Acked-by: Doug Ledford Acked-by: Michael S. Tsirkin Cc: David Airlie Cc: Jason Wang Cc: Christian Benvenuti Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- drivers/gpu/drm/drm_mm.c | 19 +++++---- drivers/gpu/drm/drm_vma_manager.c | 2 +- drivers/gpu/drm/i915/i915_gem_userptr.c | 6 +-- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_mn.c | 8 ++-- drivers/gpu/drm/radeon/radeon_vm.c | 7 ++-- drivers/infiniband/core/umem_rbtree.c | 4 +- drivers/infiniband/core/uverbs_cmd.c | 2 +- drivers/infiniband/hw/hfi1/mmu_rb.c | 10 ++--- drivers/infiniband/hw/usnic/usnic_uiom.c | 6 +-- drivers/infiniband/hw/usnic/usnic_uiom.h | 2 +- .../infiniband/hw/usnic/usnic_uiom_interval_tree.c | 15 +++---- .../infiniband/hw/usnic/usnic_uiom_interval_tree.h | 12 +++--- drivers/vhost/vhost.c | 2 +- drivers/vhost/vhost.h | 2 +- fs/hugetlbfs/inode.c | 6 +-- fs/inode.c | 2 +- include/drm/drm_mm.h | 2 +- include/linux/fs.h | 4 +- include/linux/interval_tree.h | 8 ++-- include/linux/interval_tree_generic.h | 46 +++++++++++++++++----- include/linux/mm.h | 17 ++++---- include/linux/rmap.h | 4 +- include/rdma/ib_umem_odp.h | 11 ++++-- include/rdma/ib_verbs.h | 2 +- lib/interval_tree_test.c | 4 +- mm/interval_tree.c | 10 ++--- mm/memory.c | 4 +- mm/mmap.c | 10 ++--- mm/rmap.c | 4 +- 33 files changed, 145 insertions(+), 105 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index e1cde6b80027..3b0f2ec6eec7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -51,7 +51,7 @@ struct amdgpu_mn { /* objects protected by lock */ struct mutex lock; - struct rb_root objects; + struct rb_root_cached objects; }; struct amdgpu_mn_node { @@ -76,8 +76,8 @@ static void amdgpu_mn_destroy(struct work_struct *work) mutex_lock(&adev->mn_lock); mutex_lock(&rmn->lock); hash_del(&rmn->node); - rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, - it.rb) { + rbtree_postorder_for_each_entry_safe(node, next_node, + &rmn->objects.rb_root, it.rb) { list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { bo->mn = NULL; list_del_init(&bo->mn_list); @@ -221,7 +221,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->mm = mm; rmn->mn.ops = &amdgpu_mn_ops; mutex_init(&rmn->lock); - rmn->objects = RB_ROOT; + rmn->objects = RB_ROOT_CACHED; r = __mmu_notifier_register(&rmn->mn, mm); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6b1343e5541d..b9a5a77eedaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2475,7 +2475,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u64 flags; uint64_t init_pde_value = 0; - vm->va = RB_ROOT; + vm->va = RB_ROOT_CACHED; vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; @@ -2596,10 +2596,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amd_sched_entity_fini(vm->entity.sched, &vm->entity); - if (!RB_EMPTY_ROOT(&vm->va)) { + if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); } - rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) { + rbtree_postorder_for_each_entry_safe(mapping, tmp, + &vm->va.rb_root, rb) { list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); kfree(mapping); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ba6691b58ee7..6716355403ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -118,7 +118,7 @@ struct amdgpu_vm_pt { struct amdgpu_vm { /* tree of virtual addresses mapped */ - struct rb_root va; + struct rb_root_cached va; /* protecting invalidated */ spinlock_t status_lock; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index f794089d30ac..61a1c8ea74bc 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -169,7 +169,7 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) { - return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree, + return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, start, last) ?: (struct drm_mm_node *)&mm->head_node; } EXPORT_SYMBOL(__drm_mm_interval_first); @@ -180,6 +180,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, struct drm_mm *mm = hole_node->mm; struct rb_node **link, *rb; struct drm_mm_node *parent; + bool leftmost = true; node->__subtree_last = LAST(node); @@ -196,9 +197,10 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, rb = &hole_node->rb; link = &hole_node->rb.rb_right; + leftmost = false; } else { rb = NULL; - link = &mm->interval_tree.rb_node; + link = &mm->interval_tree.rb_root.rb_node; } while (*link) { @@ -208,14 +210,15 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, parent->__subtree_last = node->__subtree_last; if (node->start < parent->start) link = &parent->rb.rb_left; - else + else { link = &parent->rb.rb_right; + leftmost = true; + } } rb_link_node(&node->rb, rb, link); - rb_insert_augmented(&node->rb, - &mm->interval_tree, - &drm_mm_interval_tree_augment); + rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, + &drm_mm_interval_tree_augment); } #define RB_INSERT(root, member, expr) do { \ @@ -577,7 +580,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) *new = *old; list_replace(&old->node_list, &new->node_list); - rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree); + rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root); if (drm_mm_hole_follows(old)) { list_replace(&old->hole_stack, &new->hole_stack); @@ -863,7 +866,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) mm->color_adjust = NULL; INIT_LIST_HEAD(&mm->hole_stack); - mm->interval_tree = RB_ROOT; + mm->interval_tree = RB_ROOT_CACHED; mm->holes_size = RB_ROOT; mm->holes_addr = RB_ROOT; diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index d9100b565198..28f1226576f8 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -147,7 +147,7 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m struct rb_node *iter; unsigned long offset; - iter = mgr->vm_addr_space_mm.interval_tree.rb_node; + iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node; best = NULL; while (likely(iter)) { diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index f152a38d7079..23fd18bd1b56 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -49,7 +49,7 @@ struct i915_mmu_notifier { spinlock_t lock; struct hlist_node node; struct mmu_notifier mn; - struct rb_root objects; + struct rb_root_cached objects; struct workqueue_struct *wq; }; @@ -123,7 +123,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, struct interval_tree_node *it; LIST_HEAD(cancelled); - if (RB_EMPTY_ROOT(&mn->objects)) + if (RB_EMPTY_ROOT(&mn->objects.rb_root)) return; /* interval ranges are inclusive, but invalidate range is exclusive */ @@ -172,7 +172,7 @@ i915_mmu_notifier_create(struct mm_struct *mm) spin_lock_init(&mn->lock); mn->mn.ops = &i915_gem_userptr_notifier; - mn->objects = RB_ROOT; + mn->objects = RB_ROOT_CACHED; mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); if (mn->wq == NULL) { kfree(mn); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ec63bc5e9de7..8cbaeec090c9 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -924,7 +924,7 @@ struct radeon_vm_id { struct radeon_vm { struct mutex mutex; - struct rb_root va; + struct rb_root_cached va; /* protecting invalidated and freed */ spinlock_t status_lock; diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 896f2cf51e4e..1d62288b7ee3 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -50,7 +50,7 @@ struct radeon_mn { /* objects protected by lock */ struct mutex lock; - struct rb_root objects; + struct rb_root_cached objects; }; struct radeon_mn_node { @@ -75,8 +75,8 @@ static void radeon_mn_destroy(struct work_struct *work) mutex_lock(&rdev->mn_lock); mutex_lock(&rmn->lock); hash_del(&rmn->node); - rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, - it.rb) { + rbtree_postorder_for_each_entry_safe(node, next_node, + &rmn->objects.rb_root, it.rb) { interval_tree_remove(&node->it, &rmn->objects); list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { @@ -205,7 +205,7 @@ static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) rmn->mm = mm; rmn->mn.ops = &radeon_mn_ops; mutex_init(&rmn->lock); - rmn->objects = RB_ROOT; + rmn->objects = RB_ROOT_CACHED; r = __mmu_notifier_register(&rmn->mn, mm); if (r) diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 5e82b408d522..e5c0e635e371 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -1185,7 +1185,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) vm->ids[i].last_id_use = NULL; } mutex_init(&vm->mutex); - vm->va = RB_ROOT; + vm->va = RB_ROOT_CACHED; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->freed); @@ -1232,10 +1232,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) struct radeon_bo_va *bo_va, *tmp; int i, r; - if (!RB_EMPTY_ROOT(&vm->va)) { + if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(rdev->dev, "still active bo inside vm\n"); } - rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) { + rbtree_postorder_for_each_entry_safe(bo_va, tmp, + &vm->va.rb_root, it.rb) { interval_tree_remove(&bo_va->it, &vm->va); r = radeon_bo_reserve(bo_va->bo, false); if (!r) { diff --git a/drivers/infiniband/core/umem_rbtree.c b/drivers/infiniband/core/umem_rbtree.c index d176597b4d78..fc801920e341 100644 --- a/drivers/infiniband/core/umem_rbtree.c +++ b/drivers/infiniband/core/umem_rbtree.c @@ -72,7 +72,7 @@ INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last, /* @last is not a part of the interval. See comment for function * node_last. */ -int rbt_ib_umem_for_each_in_range(struct rb_root *root, +int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, u64 start, u64 last, umem_call_back cb, void *cookie) @@ -95,7 +95,7 @@ int rbt_ib_umem_for_each_in_range(struct rb_root *root, } EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range); -struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root *root, +struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length) { struct umem_odp_node *node; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index e0cb99860934..4ab30d832ac5 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -118,7 +118,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, ucontext->closing = 0; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - ucontext->umem_tree = RB_ROOT; + ucontext->umem_tree = RB_ROOT_CACHED; init_rwsem(&ucontext->umem_rwsem); ucontext->odp_mrs_count = 0; INIT_LIST_HEAD(&ucontext->no_private_counters); diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 2f0d285dc278..175002c046ed 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -54,7 +54,7 @@ struct mmu_rb_handler { struct mmu_notifier mn; - struct rb_root root; + struct rb_root_cached root; void *ops_arg; spinlock_t lock; /* protect the RB tree */ struct mmu_rb_ops *ops; @@ -108,7 +108,7 @@ int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm, if (!handlr) return -ENOMEM; - handlr->root = RB_ROOT; + handlr->root = RB_ROOT_CACHED; handlr->ops = ops; handlr->ops_arg = ops_arg; INIT_HLIST_NODE(&handlr->mn.hlist); @@ -149,9 +149,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) INIT_LIST_HEAD(&del_list); spin_lock_irqsave(&handler->lock, flags); - while ((node = rb_first(&handler->root))) { + while ((node = rb_first_cached(&handler->root))) { rbnode = rb_entry(node, struct mmu_rb_node, node); - rb_erase(node, &handler->root); + rb_erase_cached(node, &handler->root); /* move from LRU list to delete list */ list_move(&rbnode->list, &del_list); } @@ -300,7 +300,7 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, { struct mmu_rb_handler *handler = container_of(mn, struct mmu_rb_handler, mn); - struct rb_root *root = &handler->root; + struct rb_root_cached *root = &handler->root; struct mmu_rb_node *node, *ptr = NULL; unsigned long flags; bool added = false; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index c49db7c33979..4381c0a9a873 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -227,7 +227,7 @@ static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, vpn_last = vpn_start + npages - 1; spin_lock(&pd->lock); - usnic_uiom_remove_interval(&pd->rb_root, vpn_start, + usnic_uiom_remove_interval(&pd->root, vpn_start, vpn_last, &rm_intervals); usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); @@ -379,7 +379,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last, (writable) ? IOMMU_WRITE : 0, IOMMU_WRITE, - &pd->rb_root, + &pd->root, &sorted_diff_intervals); if (err) { usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n", @@ -395,7 +395,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, } - err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last, + err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last, (writable) ? IOMMU_WRITE : 0); if (err) { usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n", diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h index 45ca7c1613a7..431efe4143f4 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom.h @@ -55,7 +55,7 @@ struct usnic_uiom_dev { struct usnic_uiom_pd { struct iommu_domain *domain; spinlock_t lock; - struct rb_root rb_root; + struct rb_root_cached root; struct list_head devs; int dev_cnt; }; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c index 42b4b4c4e452..d399523206c7 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c @@ -100,9 +100,9 @@ static int interval_cmp(void *priv, struct list_head *a, struct list_head *b) } static void -find_intervals_intersection_sorted(struct rb_root *root, unsigned long start, - unsigned long last, - struct list_head *list) +find_intervals_intersection_sorted(struct rb_root_cached *root, + unsigned long start, unsigned long last, + struct list_head *list) { struct usnic_uiom_interval_node *node; @@ -118,7 +118,7 @@ find_intervals_intersection_sorted(struct rb_root *root, unsigned long start, int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last, int flags, int flag_mask, - struct rb_root *root, + struct rb_root_cached *root, struct list_head *diff_set) { struct usnic_uiom_interval_node *interval, *tmp; @@ -175,7 +175,7 @@ void usnic_uiom_put_interval_set(struct list_head *intervals) kfree(interval); } -int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start, +int usnic_uiom_insert_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, int flags) { struct usnic_uiom_interval_node *interval, *tmp; @@ -246,8 +246,9 @@ err_out: return err; } -void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start, - unsigned long last, struct list_head *removed) +void usnic_uiom_remove_interval(struct rb_root_cached *root, + unsigned long start, unsigned long last, + struct list_head *removed) { struct usnic_uiom_interval_node *interval; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h index c0b0b876ab90..1d7fc3226bca 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h @@ -48,12 +48,12 @@ struct usnic_uiom_interval_node { extern void usnic_uiom_interval_tree_insert(struct usnic_uiom_interval_node *node, - struct rb_root *root); + struct rb_root_cached *root); extern void usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node, - struct rb_root *root); + struct rb_root_cached *root); extern struct usnic_uiom_interval_node * -usnic_uiom_interval_tree_iter_first(struct rb_root *root, +usnic_uiom_interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, unsigned long last); extern struct usnic_uiom_interval_node * @@ -63,7 +63,7 @@ usnic_uiom_interval_tree_iter_next(struct usnic_uiom_interval_node *node, * Inserts {start...last} into {root}. If there are overlaps, * nodes will be broken up and merged */ -int usnic_uiom_insert_interval(struct rb_root *root, +int usnic_uiom_insert_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, int flags); /* @@ -71,7 +71,7 @@ int usnic_uiom_insert_interval(struct rb_root *root, * 'removed.' The caller is responsibile for freeing memory of nodes in * 'removed.' */ -void usnic_uiom_remove_interval(struct rb_root *root, +void usnic_uiom_remove_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, struct list_head *removed); /* @@ -81,7 +81,7 @@ void usnic_uiom_remove_interval(struct rb_root *root, int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last, int flags, int flag_mask, - struct rb_root *root, + struct rb_root_cached *root, struct list_head *diff_set); /* Call this to free diff_set returned by usnic_uiom_get_intervals_diff */ void usnic_uiom_put_interval_set(struct list_head *intervals); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9cb3f722dce1..d6dbb28245e6 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1271,7 +1271,7 @@ static struct vhost_umem *vhost_umem_alloc(void) if (!umem) return NULL; - umem->umem_tree = RB_ROOT; + umem->umem_tree = RB_ROOT_CACHED; umem->numem = 0; INIT_LIST_HEAD(&umem->umem_list); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index bb7c29b8b9fc..d59a9cc65f9d 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -71,7 +71,7 @@ struct vhost_umem_node { }; struct vhost_umem { - struct rb_root umem_tree; + struct rb_root_cached umem_tree; struct list_head umem_list; int numem; }; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 8c6f4b8f910f..59073e9f01a4 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -334,7 +334,7 @@ static void remove_huge_page(struct page *page) } static void -hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end) +hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) { struct vm_area_struct *vma; @@ -498,7 +498,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) i_size_write(inode, offset); i_mmap_lock_write(mapping); - if (!RB_EMPTY_ROOT(&mapping->i_mmap)) + if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); i_mmap_unlock_write(mapping); remove_inode_hugepages(inode, offset, LLONG_MAX); @@ -523,7 +523,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) inode_lock(inode); i_mmap_lock_write(mapping); - if (!RB_EMPTY_ROOT(&mapping->i_mmap)) + if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, hole_start >> PAGE_SHIFT, hole_end >> PAGE_SHIFT); diff --git a/fs/inode.c b/fs/inode.c index 6a1626e0edaf..210054157a49 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -353,7 +353,7 @@ void address_space_init_once(struct address_space *mapping) init_rwsem(&mapping->i_mmap_rwsem); INIT_LIST_HEAD(&mapping->private_list); spin_lock_init(&mapping->private_lock); - mapping->i_mmap = RB_ROOT; + mapping->i_mmap = RB_ROOT_CACHED; } EXPORT_SYMBOL(address_space_init_once); diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 49b292e98fec..8d10fc97801c 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -172,7 +172,7 @@ struct drm_mm { * according to the (increasing) start address of the memory node. */ struct drm_mm_node head_node; /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ - struct rb_root interval_tree; + struct rb_root_cached interval_tree; struct rb_root holes_size; struct rb_root holes_addr; diff --git a/include/linux/fs.h b/include/linux/fs.h index 509434aaf5a4..6111976848ff 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -392,7 +392,7 @@ struct address_space { struct radix_tree_root page_tree; /* radix tree of all pages */ spinlock_t tree_lock; /* and lock protecting it */ atomic_t i_mmap_writable;/* count VM_SHARED mappings */ - struct rb_root i_mmap; /* tree of private and shared mappings */ + struct rb_root_cached i_mmap; /* tree of private and shared mappings */ struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ /* Protected by tree_lock together with the radix tree */ unsigned long nrpages; /* number of total pages */ @@ -487,7 +487,7 @@ static inline void i_mmap_unlock_read(struct address_space *mapping) */ static inline int mapping_mapped(struct address_space *mapping) { - return !RB_EMPTY_ROOT(&mapping->i_mmap); + return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root); } /* diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h index 724556aa3c95..202ee1283f4b 100644 --- a/include/linux/interval_tree.h +++ b/include/linux/interval_tree.h @@ -11,13 +11,15 @@ struct interval_tree_node { }; extern void -interval_tree_insert(struct interval_tree_node *node, struct rb_root *root); +interval_tree_insert(struct interval_tree_node *node, + struct rb_root_cached *root); extern void -interval_tree_remove(struct interval_tree_node *node, struct rb_root *root); +interval_tree_remove(struct interval_tree_node *node, + struct rb_root_cached *root); extern struct interval_tree_node * -interval_tree_iter_first(struct rb_root *root, +interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, unsigned long last); extern struct interval_tree_node * diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h index 58370e1862ad..f096423c8cbd 100644 --- a/include/linux/interval_tree_generic.h +++ b/include/linux/interval_tree_generic.h @@ -65,11 +65,13 @@ RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \ \ /* Insert / remove interval nodes from the tree */ \ \ -ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \ +ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \ + struct rb_root_cached *root) \ { \ - struct rb_node **link = &root->rb_node, *rb_parent = NULL; \ + struct rb_node **link = &root->rb_root.rb_node, *rb_parent = NULL; \ ITTYPE start = ITSTART(node), last = ITLAST(node); \ ITSTRUCT *parent; \ + bool leftmost = true; \ \ while (*link) { \ rb_parent = *link; \ @@ -78,18 +80,22 @@ ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \ parent->ITSUBTREE = last; \ if (start < ITSTART(parent)) \ link = &parent->ITRB.rb_left; \ - else \ + else { \ link = &parent->ITRB.rb_right; \ + leftmost = false; \ + } \ } \ \ node->ITSUBTREE = last; \ rb_link_node(&node->ITRB, rb_parent, link); \ - rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \ + rb_insert_augmented_cached(&node->ITRB, root, \ + leftmost, &ITPREFIX ## _augment); \ } \ \ -ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \ +ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \ + struct rb_root_cached *root) \ { \ - rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \ + rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \ } \ \ /* \ @@ -140,15 +146,35 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ } \ \ ITSTATIC ITSTRUCT * \ -ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \ +ITPREFIX ## _iter_first(struct rb_root_cached *root, \ + ITTYPE start, ITTYPE last) \ { \ - ITSTRUCT *node; \ + ITSTRUCT *node, *leftmost; \ \ - if (!root->rb_node) \ + if (!root->rb_root.rb_node) \ return NULL; \ - node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \ + \ + /* \ + * Fastpath range intersection/overlap between A: [a0, a1] and \ + * B: [b0, b1] is given by: \ + * \ + * a0 <= b1 && b0 <= a1 \ + * \ + * ... where A holds the lock range and B holds the smallest \ + * 'start' and largest 'last' in the tree. For the later, we \ + * rely on the root node, which by augmented interval tree \ + * property, holds the largest value in its last-in-subtree. \ + * This allows mitigating some of the tree walk overhead for \ + * for non-intersecting ranges, maintained and consulted in O(1). \ + */ \ + node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \ if (node->ITSUBTREE < start) \ return NULL; \ + \ + leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \ + if (ITSTART(leftmost) > last) \ + return NULL; \ + \ return ITPREFIX ## _subtree_search(node, start, last); \ } \ \ diff --git a/include/linux/mm.h b/include/linux/mm.h index 5195e272fc4a..f8c10d336e42 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2034,13 +2034,13 @@ extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); /* interval_tree.c */ void vma_interval_tree_insert(struct vm_area_struct *node, - struct rb_root *root); + struct rb_root_cached *root); void vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *prev, - struct rb_root *root); + struct rb_root_cached *root); void vma_interval_tree_remove(struct vm_area_struct *node, - struct rb_root *root); -struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root, + struct rb_root_cached *root); +struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, unsigned long last); struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, unsigned long start, unsigned long last); @@ -2050,11 +2050,12 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, vma; vma = vma_interval_tree_iter_next(vma, start, last)) void anon_vma_interval_tree_insert(struct anon_vma_chain *node, - struct rb_root *root); + struct rb_root_cached *root); void anon_vma_interval_tree_remove(struct anon_vma_chain *node, - struct rb_root *root); -struct anon_vma_chain *anon_vma_interval_tree_iter_first( - struct rb_root *root, unsigned long start, unsigned long last); + struct rb_root_cached *root); +struct anon_vma_chain * +anon_vma_interval_tree_iter_first(struct rb_root_cached *root, + unsigned long start, unsigned long last); struct anon_vma_chain *anon_vma_interval_tree_iter_next( struct anon_vma_chain *node, unsigned long start, unsigned long last); #ifdef CONFIG_DEBUG_VM_RB diff --git a/include/linux/rmap.h b/include/linux/rmap.h index f8ca2e74b819..733d3d8181e2 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -55,7 +55,9 @@ struct anon_vma { * is serialized by a system wide lock only visible to * mm_take_all_locks() (mm_all_locks_mutex). */ - struct rb_root rb_root; /* Interval tree of private "related" vmas */ + + /* Interval tree of private "related" vmas */ + struct rb_root_cached rb_root; }; /* diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index fb67554aabd6..5eb7f5bc8248 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -111,22 +111,25 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt, void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bound); -void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root); -void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root); +void rbt_ib_umem_insert(struct umem_odp_node *node, + struct rb_root_cached *root); +void rbt_ib_umem_remove(struct umem_odp_node *node, + struct rb_root_cached *root); typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end, void *cookie); /* * Call the callback on each ib_umem in the range. Returns the logical or of * the return values of the functions called. */ -int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end, +int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, + u64 start, u64 end, umem_call_back cb, void *cookie); /* * Find first region intersecting with address range. * Return NULL if not found */ -struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root *root, +struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length); static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e6df68048517..bdb1279a415b 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1457,7 +1457,7 @@ struct ib_ucontext { struct pid *tgid; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - struct rb_root umem_tree; + struct rb_root_cached umem_tree; /* * Protects .umem_rbroot and tree, as well as odp_mrs_count and * mmu notifiers registration. diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c index df495fe81421..0e343fd29570 100644 --- a/lib/interval_tree_test.c +++ b/lib/interval_tree_test.c @@ -19,14 +19,14 @@ __param(bool, search_all, false, "Searches will iterate all nodes in the tree"); __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint"); -static struct rb_root root = RB_ROOT; +static struct rb_root_cached root = RB_ROOT_CACHED; static struct interval_tree_node *nodes = NULL; static u32 *queries = NULL; static struct rnd_state rnd; static inline unsigned long -search(struct rb_root *root, unsigned long start, unsigned long last) +search(struct rb_root_cached *root, unsigned long start, unsigned long last) { struct interval_tree_node *node; unsigned long results = 0; diff --git a/mm/interval_tree.c b/mm/interval_tree.c index f2c2492681bf..b47664358796 100644 --- a/mm/interval_tree.c +++ b/mm/interval_tree.c @@ -28,7 +28,7 @@ INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, /* Insert node immediately after prev in the interval tree */ void vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *prev, - struct rb_root *root) + struct rb_root_cached *root) { struct rb_node **link; struct vm_area_struct *parent; @@ -55,7 +55,7 @@ void vma_interval_tree_insert_after(struct vm_area_struct *node, node->shared.rb_subtree_last = last; rb_link_node(&node->shared.rb, &parent->shared.rb, link); - rb_insert_augmented(&node->shared.rb, root, + rb_insert_augmented(&node->shared.rb, &root->rb_root, &vma_interval_tree_augment); } @@ -74,7 +74,7 @@ INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last, static inline, __anon_vma_interval_tree) void anon_vma_interval_tree_insert(struct anon_vma_chain *node, - struct rb_root *root) + struct rb_root_cached *root) { #ifdef CONFIG_DEBUG_VM_RB node->cached_vma_start = avc_start_pgoff(node); @@ -84,13 +84,13 @@ void anon_vma_interval_tree_insert(struct anon_vma_chain *node, } void anon_vma_interval_tree_remove(struct anon_vma_chain *node, - struct rb_root *root) + struct rb_root_cached *root) { __anon_vma_interval_tree_remove(node, root); } struct anon_vma_chain * -anon_vma_interval_tree_iter_first(struct rb_root *root, +anon_vma_interval_tree_iter_first(struct rb_root_cached *root, unsigned long first, unsigned long last) { return __anon_vma_interval_tree_iter_first(root, first, last); diff --git a/mm/memory.c b/mm/memory.c index 0bbc1d612a63..ec4e15494901 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2761,7 +2761,7 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma, zap_page_range_single(vma, start_addr, end_addr - start_addr, details); } -static inline void unmap_mapping_range_tree(struct rb_root *root, +static inline void unmap_mapping_range_tree(struct rb_root_cached *root, struct zap_details *details) { struct vm_area_struct *vma; @@ -2825,7 +2825,7 @@ void unmap_mapping_range(struct address_space *mapping, details.last_index = ULONG_MAX; i_mmap_lock_write(mapping); - if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) + if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) unmap_mapping_range_tree(&mapping->i_mmap, &details); i_mmap_unlock_write(mapping); } diff --git a/mm/mmap.c b/mm/mmap.c index 4c5981651407..680506faceae 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -685,7 +685,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; struct address_space *mapping = NULL; - struct rb_root *root = NULL; + struct rb_root_cached *root = NULL; struct anon_vma *anon_vma = NULL; struct file *file = vma->vm_file; bool start_changed = false, end_changed = false; @@ -3340,7 +3340,7 @@ static DEFINE_MUTEX(mm_all_locks_mutex); static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) { - if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { + if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. @@ -3356,7 +3356,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) * anon_vma->root->rwsem. */ if (__test_and_set_bit(0, (unsigned long *) - &anon_vma->root->rb_root.rb_node)) + &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); } } @@ -3458,7 +3458,7 @@ out_unlock: static void vm_unlock_anon_vma(struct anon_vma *anon_vma) { - if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { + if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change to 0 from under * us because we hold the mm_all_locks_mutex. @@ -3472,7 +3472,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma) * anon_vma->root->rwsem. */ if (!__test_and_clear_bit(0, (unsigned long *) - &anon_vma->root->rb_root.rb_node)) + &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); anon_vma_unlock_write(anon_vma); } diff --git a/mm/rmap.c b/mm/rmap.c index 0618cd85b862..b874c4761e84 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -391,7 +391,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma) * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ - if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { + if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { anon_vma->parent->degree--; continue; } @@ -425,7 +425,7 @@ static void anon_vma_ctor(void *data) init_rwsem(&anon_vma->rwsem); atomic_set(&anon_vma->refcount, 0); - anon_vma->rb_root = RB_ROOT; + anon_vma->rb_root = RB_ROOT_CACHED; } void __init anon_vma_init(void) -- cgit v1.2.3 From aebc5e6f50f770ec9392c3ca804f18b30797dfa7 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 6 Sep 2017 16:55:16 +0200 Subject: drm/amdgpu: rework amdgpu_cs_find_mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the VM instead of the BO list to find the BO for a virtual address. This fixes UVD/VCE in physical mode with VM local BOs. Signed-off-by: Christian König Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 42 +++++++----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 17 ++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 ++ 4 files changed, 30 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index c30110a3024a..5f19227b35e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1479,46 +1479,24 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo, struct amdgpu_bo_va_mapping **map) { + struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va_mapping *mapping; - unsigned i; int r; - if (!parser->bo_list) - return 0; - addr /= AMDGPU_GPU_PAGE_SIZE; - for (i = 0; i < parser->bo_list->num_entries; i++) { - struct amdgpu_bo_list_entry *lobj; - - lobj = &parser->bo_list->array[i]; - if (!lobj->bo_va) - continue; - - list_for_each_entry(mapping, &lobj->bo_va->valids, list) { - if (mapping->start > addr || - addr > mapping->last) - continue; - - *bo = lobj->bo_va->base.bo; - *map = mapping; - goto found; - } - - list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { - if (mapping->start > addr || - addr > mapping->last) - continue; + mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); + if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) + return -EINVAL; - *bo = lobj->bo_va->base.bo; - *map = mapping; - goto found; - } - } + *bo = mapping->bo_va->base.bo; + *map = mapping; - return -EINVAL; + /* Double check that the BO is reserved by this CS */ + if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket) + return -EINVAL; -found: r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem); if (unlikely(r)) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 42492e63b3a2..a4891bea2ca8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -35,6 +35,7 @@ /* bo virtual addresses in a vm */ struct amdgpu_bo_va_mapping { + struct amdgpu_bo_va *bo_va; struct list_head list; struct rb_node rb; uint64_t start; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 545531db66db..758bbb9e77f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2086,6 +2086,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo *bo = bo_va->base.bo; + mapping->bo_va = bo_va; list_add(&mapping->list, &bo_va->invalids); amdgpu_vm_it_insert(mapping, &vm->va); @@ -2263,6 +2264,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); + mapping->bo_va = NULL; trace_amdgpu_vm_bo_unmap(bo_va, mapping); if (valid) @@ -2348,6 +2350,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, if (tmp->last > eaddr) tmp->last = eaddr; + tmp->bo_va = NULL; list_add(&tmp->list, &vm->freed); trace_amdgpu_vm_bo_unmap(NULL, tmp); } @@ -2373,6 +2376,19 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, return 0; } +/** + * amdgpu_vm_bo_lookup_mapping - find mapping by address + * + * @vm: the requested VM + * + * Find a mapping by it's address. + */ +struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, + uint64_t addr) +{ + return amdgpu_vm_it_iter_first(&vm->va, addr, addr); +} + /** * amdgpu_vm_bo_rmv - remove a bo to a specific vm * @@ -2398,6 +2414,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); + mapping->bo_va = NULL; trace_amdgpu_vm_bo_unmap(bo_va, mapping); list_add(&mapping->list, &vm->freed); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 90b7741d024b..c1accd15efc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -276,6 +276,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size); +struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, + uint64_t addr); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, -- cgit v1.2.3 From d5884513a31df072879c89c80306d544467ee770 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 8 Sep 2017 14:09:41 +0200 Subject: drm/amdgpu: fix VM sync with always valid BOs v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All users of a VM must always wait for updates with always valid BOs to be completed. v2: remove debugging leftovers, rename struct member Signed-off-by: Christian König Reviewed-by: Roger He Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- 3 files changed, 17 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5f19227b35e9..ff61073b7181 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -761,10 +761,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update); - if (r) - return r; - r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; @@ -819,6 +815,12 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync); + if (r) + return r; + + r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update); + if (r) + return r; if (amdgpu_vm_debug && p->bo_list) { /* Invalidate all BOs to test for userspace bugs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 758bbb9e77f3..64baa3138965 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1141,9 +1141,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, goto error_free; amdgpu_bo_fence(parent->base.bo, fence, true); - dma_fence_put(vm->last_dir_update); - vm->last_dir_update = dma_fence_get(fence); - dma_fence_put(fence); + dma_fence_put(vm->last_update); + vm->last_update = fence; } } @@ -1804,6 +1803,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, trace_amdgpu_vm_bo_mapping(mapping); } + if (bo_va->base.bo && + bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) { + dma_fence_put(vm->last_update); + vm->last_update = dma_fence_get(bo_va->last_pt_update); + } + return 0; } @@ -2587,7 +2592,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)), "CPU update of VM recommended only for large BAR system\n"); - vm->last_dir_update = NULL; + vm->last_update = NULL; flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CLEARED; @@ -2693,7 +2698,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } amdgpu_vm_free_levels(&vm->root); - dma_fence_put(vm->last_dir_update); + dma_fence_put(vm->last_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) amdgpu_vm_free_reserved_vmid(adev, vm, i); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index c1accd15efc8..cb6a6222fc3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -140,7 +140,7 @@ struct amdgpu_vm { /* contains the page directory */ struct amdgpu_vm_pt root; - struct dma_fence *last_dir_update; + struct dma_fence *last_update; /* protecting freed */ spinlock_t freed_lock; -- cgit v1.2.3 From 4e55eb3879fea6d8c7d414cebaa5bff1da58b4a1 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 11 Sep 2017 16:54:59 +0200 Subject: drm/amdgpu: fix amdgpu_vm_handle_moved as well v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no guarantee that the last BO_VA actually needed an update. Additional to that all command submissions must wait for moved BOs to be cleared, not just the first one. v2: Don't overwrite any newer fence. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 24 ++++++++++-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +-- 3 files changed, 12 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ff61073b7181..9f1202a4182f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -814,7 +814,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } - r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync); + r = amdgpu_vm_handle_moved(adev, vm); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 64baa3138965..2df254cc802e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, dma_addr_t *pages_addr = NULL; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; - struct dma_fence *exclusive; + struct dma_fence *exclusive, **last_update; uint64_t flags; int r; @@ -1769,6 +1769,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, else flags = 0x0; + if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)) + last_update = &vm->last_update; + else + last_update = &bo_va->last_pt_update; + if (!clear && bo_va->base.moved) { bo_va->base.moved = false; list_splice_init(&bo_va->valids, &bo_va->invalids); @@ -1780,7 +1785,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, list_for_each_entry(mapping, &bo_va->invalids, list) { r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, mapping, flags, nodes, - &bo_va->last_pt_update); + last_update); if (r) return r; } @@ -1803,12 +1808,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, trace_amdgpu_vm_bo_mapping(mapping); } - if (bo_va->base.bo && - bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) { - dma_fence_put(vm->last_update); - vm->last_update = dma_fence_get(bo_va->last_pt_update); - } - return 0; } @@ -2006,15 +2005,15 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, * PTs have to be reserved! */ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_sync *sync) + struct amdgpu_vm *vm) { - struct amdgpu_bo_va *bo_va = NULL; bool clear; int r = 0; spin_lock(&vm->status_lock); while (!list_empty(&vm->moved)) { + struct amdgpu_bo_va *bo_va; + bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, base.vm_status); spin_unlock(&vm->status_lock); @@ -2030,9 +2029,6 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, } spin_unlock(&vm->status_lock); - if (bo_va) - r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index cb6a6222fc3f..48c58ae4bb3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -250,8 +250,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); int amdgpu_vm_handle_moved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_sync *sync); + struct amdgpu_vm *vm); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); -- cgit v1.2.3 From 02208441cc3a5110191996bb129db39ff10e7395 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 25 Aug 2017 20:40:26 -0400 Subject: drm/amdgpu: Add PASID management Allows assigning a PASID to a VM for identifying VMs involved in page faults. The global PASID manager is also exported in the KFD interface so that AMDGPU and KFD can share the PASID space. PASIDs of different sizes can be requested. On APUs, the PASID size is deterined by the capabilities of the IOMMU. So KFD must be able to allocate PASIDs in a smaller range. Signed-off-by: Felix Kuehling Acked-by: Alex Deucher Reviewed-by: Oded Gabbay Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 75 ++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 14 ++++- drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 6 ++ 6 files changed, 97 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index b9dbbf9cb8b0..dc7e25cce741 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_vmem_size = get_vmem_size, .get_gpu_clock_counter = get_gpu_clock_counter, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, + .alloc_pasid = amdgpu_vm_alloc_pasid, + .free_pasid = amdgpu_vm_free_pasid, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_pipeline = kgd_init_pipeline, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 309f2419c6d8..c678c69936a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_vmem_size = get_vmem_size, .get_gpu_clock_counter = get_gpu_clock_counter, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, + .alloc_pasid = amdgpu_vm_alloc_pasid, + .free_pasid = amdgpu_vm_free_pasid, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_pipeline = kgd_init_pipeline, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e16229000a98..79d9ab43d42c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -825,7 +825,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) } r = amdgpu_vm_init(adev, &fpriv->vm, - AMDGPU_VM_CONTEXT_GFX); + AMDGPU_VM_CONTEXT_GFX, 0); if (r) { kfree(fpriv); goto out_suspend; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2196bca7331c..9b795915cab1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -27,11 +27,58 @@ */ #include #include +#include #include #include #include "amdgpu.h" #include "amdgpu_trace.h" +/* + * PASID manager + * + * PASIDs are global address space identifiers that can be shared + * between the GPU, an IOMMU and the driver. VMs on different devices + * may use the same PASID if they share the same address + * space. Therefore PASIDs are allocated using a global IDA. VMs are + * looked up from the PASID per amdgpu_device. + */ +static DEFINE_IDA(amdgpu_vm_pasid_ida); + +/** + * amdgpu_vm_alloc_pasid - Allocate a PASID + * @bits: Maximum width of the PASID in bits, must be at least 1 + * + * Allocates a PASID of the given width while keeping smaller PASIDs + * available if possible. + * + * Returns a positive integer on success. Returns %-EINVAL if bits==0. + * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on + * memory allocation failure. + */ +int amdgpu_vm_alloc_pasid(unsigned int bits) +{ + int pasid = -EINVAL; + + for (bits = min(bits, 31U); bits > 0; bits--) { + pasid = ida_simple_get(&amdgpu_vm_pasid_ida, + 1U << (bits - 1), 1U << bits, + GFP_KERNEL); + if (pasid != -ENOSPC) + break; + } + + return pasid; +} + +/** + * amdgpu_vm_free_pasid - Free a PASID + * @pasid: PASID to free + */ +void amdgpu_vm_free_pasid(unsigned int pasid) +{ + ida_simple_remove(&amdgpu_vm_pasid_ida, pasid); +} + /* * GPUVM * GPUVM is similar to the legacy gart on older asics, however @@ -2539,7 +2586,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_ * Init @vm fields. */ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context) + int vm_context, unsigned int pasid) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT(adev) * 8); @@ -2620,6 +2667,19 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error_free_root; } + if (pasid) { + unsigned long flags; + + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); + r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, + GFP_ATOMIC); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + if (r < 0) + goto error_free_root; + + vm->pasid = pasid; + } + return 0; error_free_root: @@ -2673,6 +2733,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; int i; + if (vm->pasid) { + unsigned long flags; + + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); + idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + } + amd_sched_entity_fini(vm->entity.sched, &vm->entity); if (!RB_EMPTY_ROOT(&vm->va)) { @@ -2752,6 +2820,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) adev->vm_manager.vm_update_mode = 0; #endif + idr_init(&adev->vm_manager.pasid_idr); + spin_lock_init(&adev->vm_manager.pasid_lock); } /** @@ -2765,6 +2835,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) { unsigned i, j; + WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); + idr_destroy(&adev->vm_manager.pasid_idr); + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 48c58ae4bb3a..7873dfa8c0f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -25,6 +25,7 @@ #define __AMDGPU_VM_H__ #include +#include #include "gpu_scheduler.h" #include "amdgpu_sync.h" @@ -148,8 +149,9 @@ struct amdgpu_vm { /* Scheduler entity for page table updates */ struct amd_sched_entity entity; - /* client id */ + /* client id and PASID (TODO: replace client_id with PASID) */ u64 client_id; + unsigned int pasid; /* dedicated to vm */ struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; @@ -220,12 +222,20 @@ struct amdgpu_vm_manager { * BIT1[= 0] Compute updated by SDMA [= 1] by CPU */ int vm_update_mode; + + /* PASID to VM mapping, will be used in interrupt context to + * look up VM of a page fault + */ + struct idr pasid_idr; + spinlock_t pasid_lock; }; +int amdgpu_vm_alloc_pasid(unsigned int bits); +void amdgpu_vm_free_pasid(unsigned int pasid); void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context); + int vm_context, unsigned int pasid); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 94277cb734d2..f516fd10e6ba 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -112,6 +112,9 @@ struct tile_config { * * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz * + * @alloc_pasid: Allocate a PASID + * @free_pasid: Free a PASID + * * @program_sh_mem_settings: A function that should initiate the memory * properties such as main aperture memory type (cache / non cached) and * secondary aperture base address, size and memory type. @@ -160,6 +163,9 @@ struct kfd2kgd_calls { uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd); + int (*alloc_pasid)(unsigned int bits); + void (*free_pasid)(unsigned int pasid); + /* Register access functions */ void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, -- cgit v1.2.3 From a2f14820e3493145c25095873d4a510a1b25efdc Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sat, 26 Aug 2017 02:43:06 -0400 Subject: drm/amdgpu: Track pending retry faults in IH and VM (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit IH tracks pending retry faults in a hash table for fast lookup in interrupt context. Each VM has a short FIFO of pending VM faults for processing in a bottom half. The IH prescreening stage adds retry faults and filters out repeated retry interrupts to minimize the impact of interrupt storms. It's the VM's responsibility remove pending faults once they are handled. For now this is only done when the VM is destroyed. v2: - Made the hash table smaller and the FIFO longer. I never want the FIFO to fill up, because that would make prescreen take longer. 128 pending page faults should be enough to keep migrations busy. Signed-off-by: Felix Kuehling Acked-by: Christian König (v1) Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 76 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 12 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 +++ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 78 +++++++++++++++++++++++++++++++++- 6 files changed, 180 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1989c276138c..7fb8492d8e63 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -184,6 +184,7 @@ config DRM_AMDGPU select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_LCD_SUPPORT select INTERVAL_TREE + select CHASH help Choose this option if you have a recent AMD Radeon graphics card. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index c834a40cfad6..f5f27e4f0f7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -196,3 +196,79 @@ restart_ih: return IRQ_HANDLED; } + +/** + * amdgpu_ih_add_fault - Add a page fault record + * + * @adev: amdgpu device pointer + * @key: 64-bit encoding of PASID and address + * + * This should be called when a retry page fault interrupt is + * received. If this is a new page fault, it will be added to a hash + * table. The return value indicates whether this is a new fault, or + * a fault that was already known and is already being handled. + * + * If there are too many pending page faults, this will fail. Retry + * interrupts should be ignored in this case until there is enough + * free space. + * + * Returns 0 if the fault was added, 1 if the fault was already known, + * -ENOSPC if there are too many pending faults. + */ +int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key) +{ + unsigned long flags; + int r = -ENOSPC; + + if (WARN_ON_ONCE(!adev->irq.ih.faults)) + /* Should be allocated in _ih_sw_init on GPUs that + * support retry faults and require retry filtering. + */ + return r; + + spin_lock_irqsave(&adev->irq.ih.faults->lock, flags); + + /* Only let the hash table fill up to 50% for best performance */ + if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1))) + goto unlock_out; + + r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL); + if (!r) + adev->irq.ih.faults->count++; + + /* chash_table_copy_in should never fail unless we're losing count */ + WARN_ON_ONCE(r < 0); + +unlock_out: + spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags); + return r; +} + +/** + * amdgpu_ih_clear_fault - Remove a page fault record + * + * @adev: amdgpu device pointer + * @key: 64-bit encoding of PASID and address + * + * This should be called when a page fault has been handled. Any + * future interrupt with this key will be processed as a new + * page fault. + */ +void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key) +{ + unsigned long flags; + int r; + + if (!adev->irq.ih.faults) + return; + + spin_lock_irqsave(&adev->irq.ih.faults->lock, flags); + + r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL); + if (!WARN_ON_ONCE(r < 0)) { + adev->irq.ih.faults->count--; + WARN_ON_ONCE(adev->irq.ih.faults->count < 0); + } + + spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 3de8e74e5b3a..ada89358e220 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -24,6 +24,8 @@ #ifndef __AMDGPU_IH_H__ #define __AMDGPU_IH_H__ +#include + struct amdgpu_device; /* * vega10+ IH clients @@ -69,6 +71,13 @@ enum amdgpu_ih_clientid #define AMDGPU_IH_CLIENTID_LEGACY 0 +#define AMDGPU_PAGEFAULT_HASH_BITS 8 +struct amdgpu_retryfault_hashtable { + DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0); + spinlock_t lock; + int count; +}; + /* * R6xx+ IH ring */ @@ -87,6 +96,7 @@ struct amdgpu_ih_ring { bool use_doorbell; bool use_bus_addr; dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ + struct amdgpu_retryfault_hashtable *faults; }; #define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4 @@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, bool use_bus_addr); void amdgpu_ih_ring_fini(struct amdgpu_device *adev); int amdgpu_ih_process(struct amdgpu_device *adev); +int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key); +void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9b795915cab1..6c1133298b17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2680,6 +2680,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->pasid = pasid; } + INIT_KFIFO(vm->faults); + return 0; error_free_root: @@ -2731,8 +2733,13 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; + u64 fault; int i; + /* Clear pending page faults from IH when the VM is destroyed */ + while (kfifo_get(&vm->faults, &fault)) + amdgpu_ih_clear_fault(adev, fault); + if (vm->pasid) { unsigned long flags; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 7873dfa8c0f9..447ed6e7e586 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -120,6 +120,10 @@ struct amdgpu_vm_pt { unsigned last_entry_used; }; +#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr)) +#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48) +#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL) + struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root va; @@ -160,6 +164,9 @@ struct amdgpu_vm { /* Flag to indicate ATS support from PTE for GFX9 */ bool pte_support_ats; + + /* Up to 128 pending page faults */ + DECLARE_KFIFO(faults, u64, 128); }; struct amdgpu_vm_id { diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index eda4771e273f..dd6af2176d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -235,8 +235,73 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) */ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev) { - /* TODO: Filter known pending page faults */ + u32 ring_index = adev->irq.ih.rptr >> 2; + u32 dw0, dw3, dw4, dw5; + u16 pasid; + u64 addr, key; + struct amdgpu_vm *vm; + int r; + + dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); + dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]); + dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]); + + /* Filter retry page faults, let only the first one pass. If + * there are too many outstanding faults, ignore them until + * some faults get cleared. + */ + switch (dw0 & 0xff) { + case AMDGPU_IH_CLIENTID_VMC: + case AMDGPU_IH_CLIENTID_UTCL2: + break; + default: + /* Not a VM fault */ + return true; + } + + /* Not a retry fault */ + if (!(dw5 & 0x80)) + return true; + + pasid = dw3 & 0xffff; + /* No PASID, can't identify faulting process */ + if (!pasid) + return true; + + addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12); + key = AMDGPU_VM_FAULT(pasid, addr); + r = amdgpu_ih_add_fault(adev, key); + + /* Hash table is full or the fault is already being processed, + * ignore further page faults + */ + if (r != 0) + goto ignore_iv; + + /* Track retry faults in per-VM fault FIFO. */ + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + spin_unlock(&adev->vm_manager.pasid_lock); + if (WARN_ON_ONCE(!vm)) { + /* VM not found, process it normally */ + amdgpu_ih_clear_fault(adev, key); + return true; + } + /* No locking required with single writer and single reader */ + r = kfifo_put(&vm->faults, key); + if (!r) { + /* FIFO is full. Ignore it until there is space */ + amdgpu_ih_clear_fault(adev, key); + goto ignore_iv; + } + + /* It's the first fault for this address, process it normally */ return true; + +ignore_iv: + adev->irq.ih.rptr += 32; + return false; } /** @@ -323,6 +388,14 @@ static int vega10_ih_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1; + adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL); + if (!adev->irq.ih.faults) + return -ENOMEM; + INIT_CHASH_TABLE(adev->irq.ih.faults->hash, + AMDGPU_PAGEFAULT_HASH_BITS, 8, 0); + spin_lock_init(&adev->irq.ih.faults->lock); + adev->irq.ih.faults->count = 0; + r = amdgpu_irq_init(adev); return r; @@ -335,6 +408,9 @@ static int vega10_ih_sw_fini(void *handle) amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); + kfree(adev->irq.ih.faults); + adev->irq.ih.faults = NULL; + return 0; } -- cgit v1.2.3 From c98171ccf6580407d07a3b5dc8188ce9e1f4f7ca Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 21 Sep 2017 16:26:41 -0400 Subject: drm/amdgpu: Handle GPUVM fault storms When many wavefronts cause VM faults at the same time, it can overwhelm the interrupt handler and cause IH ring overflows before the driver can notify or kill the faulting application. As a workaround I'm introducing limited per-VM fault credit. After that number of VM faults have occurred, further VM faults are filtered out at the prescreen stage of processing. This depends on the PASID in the interrupt packet, so it currently only works for KFD contexts. Signed-off-by: Felix Kuehling Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 31 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 ++++++- drivers/gpu/drm/amd/amdgpu/cik_ih.c | 19 +++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/cz_ih.c | 19 +++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 19 +++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 19 +++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 11 +++++++---- 7 files changed, 112 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8fcc743dfa86..c91d5c7a273d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2682,6 +2682,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, } INIT_KFIFO(vm->faults); + vm->fault_credit = 16; return 0; @@ -2775,6 +2776,36 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_reserved_vmid(adev, vm, i); } +/** + * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID + * + * @adev: amdgpu_device pointer + * @pasid: PASID do identify the VM + * + * This function is expected to be called in interrupt context. Returns + * true if there was fault credit, false otherwise + */ +bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, + unsigned int pasid) +{ + struct amdgpu_vm *vm; + + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + spin_unlock(&adev->vm_manager.pasid_lock); + if (!vm) + /* VM not found, can't track fault credit */ + return true; + + /* No lock needed. only accessed by IRQ handler */ + if (!vm->fault_credit) + /* Too many faults in this VM */ + return false; + + vm->fault_credit--; + return true; +} + /** * amdgpu_vm_manager_init - init the VM manager * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 447ed6e7e586..66efbc2e43af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -165,8 +165,11 @@ struct amdgpu_vm { /* Flag to indicate ATS support from PTE for GFX9 */ bool pte_support_ats; - /* Up to 128 pending page faults */ + /* Up to 128 pending retry page faults */ DECLARE_KFIFO(faults, u64, 128); + + /* Limit non-retry fault storms */ + unsigned int fault_credit; }; struct amdgpu_vm_id { @@ -244,6 +247,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int vm_context, unsigned int pasid); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); +bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, + unsigned int pasid); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 07d3d895da10..a870b354e3f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -237,8 +237,23 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev) */ static bool cik_ih_prescreen_iv(struct amdgpu_device *adev) { - /* Process all interrupts */ - return true; + u32 ring_index = adev->irq.ih.rptr >> 2; + u16 pasid; + + switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) { + case 146: + case 147: + pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16; + if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid)) + return true; + break; + default: + /* Not a VM fault */ + return true; + } + + adev->irq.ih.rptr += 16; + return false; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index b6cdf4afaf46..fa61d649bb44 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -216,8 +216,23 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev) */ static bool cz_ih_prescreen_iv(struct amdgpu_device *adev) { - /* Process all interrupts */ - return true; + u32 ring_index = adev->irq.ih.rptr >> 2; + u16 pasid; + + switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) { + case 146: + case 147: + pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16; + if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid)) + return true; + break; + default: + /* Not a VM fault */ + return true; + } + + adev->irq.ih.rptr += 16; + return false; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 65ed6d3a8f05..bd592cb39f37 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -216,8 +216,23 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) */ static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev) { - /* Process all interrupts */ - return true; + u32 ring_index = adev->irq.ih.rptr >> 2; + u16 pasid; + + switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) { + case 146: + case 147: + pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16; + if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid)) + return true; + break; + default: + /* Not a VM fault */ + return true; + } + + adev->irq.ih.rptr += 16; + return false; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 5ed00692618e..aa4e320e31f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -227,8 +227,23 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) */ static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev) { - /* Process all interrupts */ - return true; + u32 ring_index = adev->irq.ih.rptr >> 2; + u16 pasid; + + switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) { + case 146: + case 147: + pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16; + if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid)) + return true; + break; + default: + /* Not a VM fault */ + return true; + } + + adev->irq.ih.rptr += 16; + return false; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index a3b30d84dbb3..697325737ba8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -260,15 +260,18 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev) return true; } - /* Not a retry fault */ - if (!(dw5 & 0x80)) - return true; - pasid = dw3 & 0xffff; /* No PASID, can't identify faulting process */ if (!pasid) return true; + /* Not a retry fault, check fault credit */ + if (!(dw5 & 0x80)) { + if (!amdgpu_vm_pasid_fault_credit(adev, pasid)) + goto ignore_iv; + return true; + } + addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12); key = AMDGPU_VM_FAULT(pasid, addr); r = amdgpu_ih_add_fault(adev, key); -- cgit v1.2.3 From 6d16dac85c081825af58111023428c43d1da7e1a Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Thu, 31 Aug 2017 15:55:00 -0400 Subject: drm/amdgpu: Set the correct value for PDEs/PTEs of ATC memory on Raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Without the additional bits set in PDEs/PTEs, the ATC memory access would have failed on Raven. Signed-off-by: Yong Zhao Acked-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 9 ++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 10 ++++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index fee0a32ac56f..b500bb6a8491 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -328,9 +328,10 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, AMDGPU_GEM_CREATE_SHADOW); if (vm->pte_support_ats) { - init_value = AMDGPU_PTE_SYSTEM; + init_value = AMDGPU_PTE_DEFAULT_ATC; if (level != adev->vm_manager.num_level - 1) init_value |= AMDGPU_PDE_PTE; + } /* walk over the address space and allocate the page tables */ @@ -2017,7 +2018,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, list_del(&mapping->list); if (vm->pte_support_ats) - init_pte_value = AMDGPU_PTE_SYSTEM; + init_pte_value = AMDGPU_PTE_DEFAULT_ATC; r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, mapping->start, mapping->last, @@ -2629,7 +2630,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (adev->asic_type == CHIP_RAVEN) { vm->pte_support_ats = true; - init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE; + init_pde_value = AMDGPU_PTE_DEFAULT_ATC + | AMDGPU_PDE_PTE; + } } else vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index d68f39b4e5e7..aa914256b4bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -73,6 +73,16 @@ struct amdgpu_bo_list_entry; #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) +/* For Raven */ +#define AMDGPU_MTYPE_CC 2 + +#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \ + | AMDGPU_PTE_SNOOPED \ + | AMDGPU_PTE_EXECUTABLE \ + | AMDGPU_PTE_READABLE \ + | AMDGPU_PTE_WRITEABLE \ + | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC)) + /* How to programm VM fault handling */ #define AMDGPU_VM_FAULT_STOP_NEVER 0 #define AMDGPU_VM_FAULT_STOP_FIRST 1 -- cgit v1.2.3 From ff4cd38943199348a53f980709e16f0bc5c0b8c9 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 6 Nov 2017 15:25:37 +0100 Subject: drm/amdgpu: make AMDGPU_VA_RESERVED_SIZE 64bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Even when it's a small handle it as 64bit value as well. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 5 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index fb72edc4c026..4710e51099c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -556,9 +556,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { dev_err(&dev->pdev->dev, - "va_address 0x%lX is in reserved area 0x%X\n", - (unsigned long)args->va_address, - AMDGPU_VA_RESERVED_SIZE); + "va_address 0x%LX is in reserved area 0x%LX\n", + args->va_address, AMDGPU_VA_RESERVED_SIZE); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index aa914256b4bc..bae77353447b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -94,7 +94,8 @@ struct amdgpu_bo_list_entry; #define AMDGPU_MMHUB 1 /* hardcode that limit for now */ -#define AMDGPU_VA_RESERVED_SIZE (8 << 20) +#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20) + /* max vmids dedicated for process */ #define AMDGPU_VM_MAX_RESERVED_VMID 1 -- cgit v1.2.3