summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c852
1 files changed, 602 insertions, 250 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c9b131b13ef7..ad5bf86ee8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -42,15 +42,23 @@
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
+#include <linux/iommu.h>
#include "amdgpu.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h"
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem, unsigned num_pages,
+ uint64_t offset, unsigned window,
+ struct amdgpu_ring *ring,
+ uint64_t *addr);
+
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
/*
* Global memory.
*/
@@ -97,6 +105,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
goto error_bo;
}
+ mutex_init(&adev->mman.gtt_window_lock);
+
ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
@@ -123,6 +133,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
if (adev->mman.mem_global_referenced) {
amd_sched_entity_fini(adev->mman.entity.sched,
&adev->mman.entity);
+ mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
drm_global_item_unref(&adev->mman.mem_global_ref);
adev->mman.mem_global_referenced = false;
@@ -150,7 +161,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_TT:
man->func = &amdgpu_gtt_mgr_func;
- man->gpu_offset = adev->mc.gtt_start;
+ man->gpu_offset = adev->mc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -186,12 +197,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- static struct ttm_place placements = {
+ static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
- unsigned i;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
@@ -200,29 +210,43 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (adev->mman.buffer_funcs &&
adev->mman.buffer_funcs_ring &&
adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+ unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ struct drm_mm_node *node = bo->mem.mm_node;
+ unsigned long pages_left;
+
+ for (pages_left = bo->mem.num_pages;
+ pages_left;
+ pages_left -= node->size, node++) {
+ if (node->start < fpfn)
+ break;
+ }
+
+ if (!pages_left)
+ goto gtt;
+
+ /* Try evicting to the CPU inaccessible part of VRAM
+ * first, but only set GTT as busy placement, so this
+ * BO will be evicted to GTT rather than causing other
+ * BOs to be evicted from VRAM
+ */
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
+ abo->placements[0].fpfn = fpfn;
+ abo->placements[0].lpfn = 0;
+ abo->placement.busy_placement = &abo->placements[1];
+ abo->placement.num_busy_placement = 1;
} else {
+gtt:
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
- for (i = 0; i < abo->placement.num_placement; ++i) {
- if (!(abo->placements[i].flags &
- TTM_PL_FLAG_TT))
- continue;
-
- if (abo->placements[i].lpfn)
- continue;
-
- /* set an upper limit to force directly
- * allocating address space for the BO.
- */
- abo->placements[i].lpfn =
- adev->mc.gtt_size >> PAGE_SHIFT;
- }
}
break;
case TTM_PL_TT:
@@ -234,7 +258,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
@@ -252,106 +276,185 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
- struct drm_mm_node *mm_node,
- struct ttm_mem_reg *mem,
- uint64_t *addr)
+static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem)
{
- int r;
-
- switch (mem->mem_type) {
- case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, mem);
- if (r)
- return r;
+ uint64_t addr = 0;
- case TTM_PL_VRAM:
- *addr = mm_node->start << PAGE_SHIFT;
- *addr += bo->bdev->man[mem->mem_type].gpu_offset;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", mem->mem_type);
- return -EINVAL;
+ if (mem->mem_type != TTM_PL_TT ||
+ amdgpu_gtt_mgr_is_allocated(mem)) {
+ addr = mm_node->start << PAGE_SHIFT;
+ addr += bo->bdev->man[mem->mem_type].gpu_offset;
}
+ return addr;
+}
- return 0;
+/**
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node
+ * corresponding to @offset. It also modifies the offset to be
+ * within the drm_mm_node returned
+ */
+static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+ unsigned long *offset)
+{
+ struct drm_mm_node *mm_node = mem->mm_node;
+
+ while (*offset >= (mm_node->size << PAGE_SHIFT)) {
+ *offset -= (mm_node->size << PAGE_SHIFT);
+ ++mm_node;
+ }
+ return mm_node;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+/**
+ * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ *
+ * The function copies @size bytes from {src->mem + src->offset} to
+ * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
+ * move and different for a BO to BO copy.
+ *
+ * @f: Returns the last fence if multiple jobs are submitted.
+ */
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ struct amdgpu_copy_mem *src,
+ struct amdgpu_copy_mem *dst,
+ uint64_t size,
+ struct reservation_object *resv,
+ struct dma_fence **f)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-
- struct drm_mm_node *old_mm, *new_mm;
- uint64_t old_start, old_size, new_start, new_size;
- unsigned long num_pages;
+ struct drm_mm_node *src_mm, *dst_mm;
+ uint64_t src_node_start, dst_node_start, src_node_size,
+ dst_node_size, src_page_offset, dst_page_offset;
struct dma_fence *fence = NULL;
- int r;
-
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+ int r = 0;
+ const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE);
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- old_mm = old_mem->mm_node;
- r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
- if (r)
- return r;
- old_size = old_mm->size;
+ src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
+ src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
+ src->offset;
+ src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
+ src_page_offset = src_node_start & (PAGE_SIZE - 1);
+ dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
+ dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
+ dst->offset;
+ dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
+ dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
- new_mm = new_mem->mm_node;
- r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
- if (r)
- return r;
- new_size = new_mm->size;
+ mutex_lock(&adev->mman.gtt_window_lock);
- num_pages = new_mem->num_pages;
- while (num_pages) {
- unsigned long cur_pages = min(old_size, new_size);
+ while (size) {
+ unsigned long cur_size;
+ uint64_t from = src_node_start, to = dst_node_start;
struct dma_fence *next;
- r = amdgpu_copy_buffer(ring, old_start, new_start,
- cur_pages * PAGE_SIZE,
- bo->resv, &next, false);
+ /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
+ * begins at an offset, then adjust the size accordingly
+ */
+ cur_size = min3(min(src_node_size, dst_node_size), size,
+ GTT_MAX_BYTES);
+ if (cur_size + src_page_offset > GTT_MAX_BYTES ||
+ cur_size + dst_page_offset > GTT_MAX_BYTES)
+ cur_size -= max(src_page_offset, dst_page_offset);
+
+ /* Map only what needs to be accessed. Map src to window 0 and
+ * dst to window 1
+ */
+ if (src->mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(src->mem)) {
+ r = amdgpu_map_buffer(src->bo, src->mem,
+ PFN_UP(cur_size + src_page_offset),
+ src_node_start, 0, ring,
+ &from);
+ if (r)
+ goto error;
+ /* Adjust the offset because amdgpu_map_buffer returns
+ * start of mapped page
+ */
+ from += src_page_offset;
+ }
+
+ if (dst->mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(dst->mem)) {
+ r = amdgpu_map_buffer(dst->bo, dst->mem,
+ PFN_UP(cur_size + dst_page_offset),
+ dst_node_start, 1, ring,
+ &to);
+ if (r)
+ goto error;
+ to += dst_page_offset;
+ }
+
+ r = amdgpu_copy_buffer(ring, from, to, cur_size,
+ resv, &next, false, true);
if (r)
goto error;
dma_fence_put(fence);
fence = next;
- num_pages -= cur_pages;
- if (!num_pages)
+ size -= cur_size;
+ if (!size)
break;
- old_size -= cur_pages;
- if (!old_size) {
- r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
- &old_start);
- if (r)
- goto error;
- old_size = old_mm->size;
+ src_node_size -= cur_size;
+ if (!src_node_size) {
+ src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
+ src->mem);
+ src_node_size = (src_mm->size << PAGE_SHIFT);
} else {
- old_start += cur_pages * PAGE_SIZE;
+ src_node_start += cur_size;
+ src_page_offset = src_node_start & (PAGE_SIZE - 1);
}
-
- new_size -= cur_pages;
- if (!new_size) {
- r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
- &new_start);
- if (r)
- goto error;
-
- new_size = new_mm->size;
+ dst_node_size -= cur_size;
+ if (!dst_node_size) {
+ dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
+ dst->mem);
+ dst_node_size = (dst_mm->size << PAGE_SHIFT);
} else {
- new_start += cur_pages * PAGE_SIZE;
+ dst_node_start += cur_size;
+ dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
}
}
+error:
+ mutex_unlock(&adev->mman.gtt_window_lock);
+ if (f)
+ *f = dma_fence_get(fence);
+ dma_fence_put(fence);
+ return r;
+}
+
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_copy_mem src, dst;
+ struct dma_fence *fence = NULL;
+ int r;
+
+ src.bo = bo;
+ dst.bo = bo;
+ src.mem = old_mem;
+ dst.mem = new_mem;
+ src.offset = 0;
+ dst.offset = 0;
+
+ r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
+ new_mem->num_pages << PAGE_SHIFT,
+ bo->resv, &fence);
+ if (r)
+ goto error;
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
dma_fence_put(fence);
@@ -384,7 +487,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+ placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
@@ -431,7 +534,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+ placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
@@ -462,7 +565,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
int r;
/* Can't move a pinned BO */
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
@@ -507,6 +610,15 @@ memcpy:
}
}
+ if (bo->type == ttm_bo_type_device &&
+ new_mem->mem_type == TTM_PL_VRAM &&
+ old_mem->mem_type != TTM_PL_VRAM) {
+ /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+ * accesses the BO after it's moved.
+ */
+ abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ }
+
/* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
return 0;
@@ -551,13 +663,12 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
- struct drm_mm_node *mm = bo->mem.mm_node;
- uint64_t size = mm->size;
- uint64_t offset = page_offset;
+ struct drm_mm_node *mm;
+ unsigned long offset = (page_offset << PAGE_SHIFT);
- page_offset = do_div(offset, size);
- mm += offset;
- return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
+ mm = amdgpu_find_mm_node(&bo->mem, &offset);
+ return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
+ (offset >> PAGE_SHIFT);
}
/*
@@ -578,6 +689,7 @@ struct amdgpu_ttm_tt {
spinlock_t guptasklock;
struct list_head guptasks;
atomic_t mmu_invalidations;
+ uint32_t last_set_pages;
struct list_head list;
};
@@ -591,6 +703,8 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
flags |= FOLL_WRITE;
+ down_read(&current->mm->mmap_sem);
+
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@@ -598,8 +712,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
- if (!vma || vma->vm_file || vma->vm_end < end)
+ if (!vma || vma->vm_file || vma->vm_end < end) {
+ up_read(&current->mm->mmap_sem);
return -EPERM;
+ }
}
do {
@@ -626,13 +742,47 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
} while (pinned < ttm->num_pages);
+ up_read(&current->mm->mmap_sem);
return 0;
release_pages:
- release_pages(pages, pinned, 0);
+ release_pages(pages, pinned);
+ up_read(&current->mm->mmap_sem);
return r;
}
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i])
+ put_page(ttm->pages[i]);
+
+ ttm->pages[i] = pages ? pages[i] : NULL;
+ }
+}
+
+void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ struct page *page = ttm->pages[i];
+
+ if (!page)
+ continue;
+
+ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+ set_page_dirty(page);
+
+ mark_page_accessed(page);
+ }
+}
+
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
@@ -670,7 +820,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct sg_page_iter sg_iter;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
@@ -683,14 +832,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
- if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
- set_page_dirty(page);
-
- mark_page_accessed(page);
- put_page(page);
- }
+ amdgpu_ttm_tt_mark_user_pages(ttm);
sg_free_table(ttm->sg);
}
@@ -699,7 +841,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct amdgpu_ttm_tt *gtt = (void*)ttm;
- int r;
+ uint64_t flags;
+ int r = 0;
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(ttm);
@@ -718,7 +861,25 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
- return 0;
+ if (!amdgpu_gtt_mgr_is_allocated(bo_mem))
+ return 0;
+
+ spin_lock(&gtt->adev->gtt_list_lock);
+ flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
+ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+ r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
+
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ ttm->num_pages, gtt->offset);
+ goto error_gart_bind;
+ }
+
+ list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+error_gart_bind:
+ spin_unlock(&gtt->adev->gtt_list_lock);
+ return r;
}
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
@@ -730,36 +891,38 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_tt *ttm = bo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- uint64_t flags;
+ struct ttm_mem_reg tmp;
+ struct ttm_placement placement;
+ struct ttm_place placements;
int r;
if (!ttm || amdgpu_ttm_is_bound(ttm))
return 0;
- r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
- NULL, bo_mem);
- if (r) {
- DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
- return r;
- }
+ tmp = bo->mem;
+ tmp.mm_node = NULL;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
+ placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
+ TTM_PL_FLAG_TT;
- spin_lock(&gtt->adev->gtt_list_lock);
- flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
- gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
- r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
+ if (unlikely(r))
+ return r;
- if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
- ttm->num_pages, gtt->offset);
- goto error_gart_bind;
- }
+ r = ttm_bo_move_ttm(bo, true, false, &tmp);
+ if (unlikely(r))
+ ttm_bo_mem_put(bo, &tmp);
+ else
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
+ bo->bdev->man[bo->mem.mem_type].gpu_offset;
- list_add_tail(&gtt->list, &gtt->adev->gtt_list);
-error_gart_bind:
- spin_unlock(&gtt->adev->gtt_list_lock);
return r;
}
@@ -852,10 +1015,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
- int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
@@ -878,44 +1039,23 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
- adev = amdgpu_ttm_adev(ttm->bdev);
-
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
return ttm_dma_populate(&gtt->ttm, adev->dev);
}
#endif
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
- while (i--) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
- return 0;
+ return ttm_populate_and_map_pages(adev->dev, &gtt->ttm);
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
+ amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
@@ -933,14 +1073,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-
- ttm_pool_unpopulate(ttm);
+ ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
}
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
@@ -957,6 +1090,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
spin_lock_init(&gtt->guptasklock);
INIT_LIST_HEAD(&gtt->guptasks);
atomic_set(&gtt->mmu_invalidations, 0);
+ gtt->last_set_pages = 0;
return 0;
}
@@ -1009,6 +1143,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
return prev_invalidated != *last_invalidated;
}
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+ if (gtt == NULL || !gtt->userptr)
+ return false;
+
+ return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1049,9 +1193,6 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
- return ttm_bo_eviction_valuable(bo, place);
-
switch (bo->mem.mem_type) {
case TTM_PL_TT:
return true;
@@ -1066,7 +1207,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
num_pages -= node->size;
++node;
}
- break;
+ return false;
default:
break;
@@ -1075,6 +1216,64 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
return ttm_bo_eviction_valuable(bo, place);
}
+static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
+ unsigned long offset,
+ void *buf, int len, int write)
+{
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct drm_mm_node *nodes;
+ uint32_t value = 0;
+ int ret = 0;
+ uint64_t pos;
+ unsigned long flags;
+
+ if (bo->mem.mem_type != TTM_PL_VRAM)
+ return -EIO;
+
+ nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
+ pos = (nodes->start << PAGE_SHIFT) + offset;
+
+ while (len && pos < adev->mc.mc_vram_size) {
+ uint64_t aligned_pos = pos & ~(uint64_t)3;
+ uint32_t bytes = 4 - (pos & 3);
+ uint32_t shift = (pos & 3) * 8;
+ uint32_t mask = 0xffffffff << shift;
+
+ if (len < bytes) {
+ mask &= 0xffffffff >> (bytes - len) * 8;
+ bytes = len;
+ }
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
+ if (!write || mask != 0xffffffff)
+ value = RREG32_NO_KIQ(mmMM_DATA);
+ if (write) {
+ value &= ~mask;
+ value |= (*(uint32_t *)buf << shift) & mask;
+ WREG32_NO_KIQ(mmMM_DATA, value);
+ }
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ if (!write) {
+ value = (value & mask) >> shift;
+ memcpy(buf, &value, bytes);
+ }
+
+ ret += bytes;
+ buf = (uint8_t *)buf + bytes;
+ pos += bytes;
+ len -= bytes;
+ if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
+ ++nodes;
+ pos = (nodes->start << PAGE_SHIFT);
+ }
+ }
+
+ return ret;
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
@@ -1090,11 +1289,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
+ .access_memory = &amdgpu_ttm_access_memory
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
+ uint64_t gtt_size;
int r;
+ u64 vis_vram_limit;
r = amdgpu_ttm_global_init(adev);
if (r) {
@@ -1118,36 +1320,46 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
+
+ /* Reduce size of CPU-visible VRAM if requested */
+ vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
+ if (amdgpu_vis_vram_limit > 0 &&
+ vis_vram_limit <= adev->mc.visible_vram_size)
+ adev->mc.visible_vram_size = vis_vram_limit;
+
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
- r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->stollen_vga_memory);
+ /*
+ *The reserved vram for firmware must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_fw_reserve_vram_init(adev);
if (r) {
return r;
}
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
+
+ r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->stolen_vga_memory,
+ NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
- if (r) {
- amdgpu_bo_unref(&adev->stollen_vga_memory);
- return r;
- }
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
- r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
- adev->mc.gtt_size >> PAGE_SHIFT);
+
+ if (amdgpu_gtt_size == -1)
+ gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->mc.mc_vram_size);
+ else
+ gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
}
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
- (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
+ (unsigned)(gtt_size / (1024 * 1024)));
adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
@@ -1203,13 +1415,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (!adev->mman.initialized)
return;
amdgpu_ttm_debugfs_fini(adev);
- if (adev->stollen_vga_memory) {
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
+ if (adev->stolen_vga_memory) {
+ r = amdgpu_bo_reserve(adev->stolen_vga_memory, true);
if (r == 0) {
- amdgpu_bo_unpin(adev->stollen_vga_memory);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
+ amdgpu_bo_unpin(adev->stolen_vga_memory);
+ amdgpu_bo_unreserve(adev->stolen_vga_memory);
}
- amdgpu_bo_unref(&adev->stollen_vga_memory);
+ amdgpu_bo_unref(&adev->stolen_vga_memory);
}
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
@@ -1256,12 +1468,77 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
}
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem, unsigned num_pages,
+ uint64_t offset, unsigned window,
+ struct amdgpu_ring *ring,
+ uint64_t *addr)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ struct amdgpu_device *adev = ring->adev;
+ struct ttm_tt *ttm = bo->ttm;
+ struct amdgpu_job *job;
+ unsigned num_dw, num_bytes;
+ dma_addr_t *dma_address;
+ struct dma_fence *fence;
+ uint64_t src_addr, dst_addr;
+ uint64_t flags;
+ int r;
+
+ BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+ AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+ *addr = adev->mc.gart_start;
+ *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE;
+
+ num_dw = adev->mman.buffer_funcs->copy_num_dw;
+ while (num_dw & 0x7)
+ num_dw++;
+
+ num_bytes = num_pages * 8;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
+ if (r)
+ return r;
+
+ src_addr = num_dw * 4;
+ src_addr += job->ibs[0].gpu_addr;
+
+ dst_addr = adev->gart.table_addr;
+ dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+ dst_addr, num_bytes);
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
+ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
+ r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+ &job->ibs[0].ptr[num_dw]);
+ if (r)
+ goto error_free;
+
+ r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ if (r)
+ goto error_free;
+
+ dma_fence_put(fence);
+
+ return r;
+
+error_free:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ uint64_t dst_offset, uint32_t byte_count,
struct reservation_object *resv,
- struct dma_fence **fence, bool direct_submit)
+ struct dma_fence **fence, bool direct_submit,
+ bool vm_needs_flush)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1283,9 +1560,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (r)
return r;
+ job->vm_needs_flush = vm_needs_flush;
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -1327,12 +1606,13 @@ error_free:
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
+ uint64_t src_data,
struct reservation_object *resv,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+ uint32_t max_bytes = 8 *
+ adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *mm_node;
@@ -1347,6 +1627,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return -EINVAL;
}
+ if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (r)
+ return r;
+ }
+
num_pages = bo->tbo.num_pages;
mm_node = bo->tbo.mem.mm_node;
num_loops = 0;
@@ -1357,7 +1643,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
num_pages -= mm_node->size;
++mm_node;
}
- num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+ /* num of dwords for each SDMA_OP_PTEPDE cmd */
+ num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
/* for IB padding */
num_dw += 64;
@@ -1368,7 +1656,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -1382,16 +1670,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
uint64_t dst_addr;
- r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
- &bo->tbo.mem, &dst_addr);
- if (r)
- return r;
+ WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
+ dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
while (byte_count) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_addr, cur_size_in_bytes);
+ amdgpu_vm_set_pte_pde(adev, &job->ibs[0],
+ dst_addr, 0,
+ cur_size_in_bytes >> 3, 0,
+ src_data);
dst_addr += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
@@ -1417,32 +1705,16 @@ error_free:
#if defined(CONFIG_DEBUG_FS)
-extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
- *man);
static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
unsigned ttm_pl = *(int *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
+ struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
- drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
- switch (ttm_pl) {
- case TTM_PL_VRAM:
- seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
- adev->mman.bdev.man[ttm_pl].size,
- (u64)atomic64_read(&adev->vram_usage) >> 20,
- (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
- break;
- case TTM_PL_TT:
- amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
- break;
- }
+ man->func->debug(man, &p);
return 0;
}
@@ -1479,9 +1751,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
- WREG32(mmMM_INDEX_HI, *pos >> 31);
- value = RREG32(mmMM_DATA);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
+ value = RREG32_NO_KIQ(mmMM_DATA);
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
r = put_user(value, (uint32_t *)buf);
@@ -1497,10 +1769,50 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
}
+static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (*pos >= adev->mc.mc_vram_size)
+ return -ENXIO;
+
+ while (size) {
+ unsigned long flags;
+ uint32_t value;
+
+ if (*pos >= adev->mc.mc_vram_size)
+ return result;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
+ WREG32_NO_KIQ(mmMM_DATA, value);
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
static const struct file_operations amdgpu_ttm_vram_fops = {
.owner = THIS_MODULE,
.read = amdgpu_ttm_vram_read,
- .llseek = default_llseek
+ .write = amdgpu_ttm_vram_write,
+ .llseek = default_llseek,
};
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
@@ -1552,6 +1864,53 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
+static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ int r;
+ uint64_t phys;
+ struct iommu_domain *dom;
+
+ // always return 8 bytes
+ if (size != 8)
+ return -EINVAL;
+
+ // only accept page addresses
+ if (*pos & 0xFFF)
+ return -EINVAL;
+
+ dom = iommu_get_domain_for_dev(adev->dev);
+ if (dom)
+ phys = iommu_iova_to_phys(dom, *pos);
+ else
+ phys = *pos;
+
+ r = copy_to_user(buf, &phys, 8);
+ if (r)
+ return -EFAULT;
+
+ return 8;
+}
+
+static const struct file_operations amdgpu_ttm_iova_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_iova_to_phys_read,
+ .llseek = default_llseek
+};
+
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+ int domain;
+} ttm_debugfs_entries[] = {
+ { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
+#endif
+ { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
+};
+
#endif
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
@@ -1562,22 +1921,21 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root;
- ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_vram_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.mc_vram_size);
- adev->mman.vram = ent;
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_gtt_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.gtt_size);
- adev->mman.gtt = ent;
+ for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
+ ent = debugfs_create_file(
+ ttm_debugfs_entries[count].name,
+ S_IFREG | S_IRUGO, root,
+ adev,
+ ttm_debugfs_entries[count].fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
+ i_size_write(ent->d_inode, adev->mc.mc_vram_size);
+ else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
+ i_size_write(ent->d_inode, adev->mc.gart_size);
+ adev->mman.debugfs_entries[count] = ent;
+ }
-#endif
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
@@ -1587,7 +1945,6 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else
-
return 0;
#endif
}
@@ -1595,14 +1952,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
- debugfs_remove(adev->mman.vram);
- adev->mman.vram = NULL;
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- debugfs_remove(adev->mman.gtt);
- adev->mman.gtt = NULL;
-#endif
-
+ for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
+ debugfs_remove(adev->mman.debugfs_entries[i]);
#endif
}