summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2022-09-28 20:31:38 +0200
committerChristian König <christian.koenig@amd.com>2022-11-03 12:45:20 +0100
commitf7d66fb2ea43a3016e78a700a2ca6c77a74579f9 (patch)
tree86cde4157e5735e0f325dd239b515cdc7c908e36 /drivers/gpu/drm/amd
parent940ca22b7ea9db6857ba7c6adb961b84d8cc28ff (diff)
drm/amdgpu: cleanup scheduler job initialization v2
Init the DRM scheduler base class while allocating the job. This makes the whole handling much more cleaner. v2: fix coding style Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Luben Tuikov <luben.tuikov@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-7-christian.koenig@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c17
14 files changed, 135 insertions, 150 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 03bbfaa51cbc..cf58c1125e60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -674,7 +674,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err;
}
- ret = amdgpu_job_alloc(adev, 1, &job, NULL);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1bbd39b3b0fc..aa6f6c428dbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -291,12 +291,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
return -EINVAL;
for (i = 0; i < p->gang_size; ++i) {
- ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
- if (ret)
- goto free_all_kdata;
-
- ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
- &fpriv->vm);
+ ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
+ num_ibs[i], &p->jobs[i]);
if (ret)
goto free_all_kdata;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 384149a81978..fa60d25dfbd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,8 +89,9 @@ exit:
return DRM_GPU_SCHED_STAT_NOMINAL;
}
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm)
+int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct drm_sched_entity *entity, void *owner,
+ unsigned int num_ibs, struct amdgpu_job **job)
{
if (num_ibs == 0)
return -EINVAL;
@@ -111,23 +112,30 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
- return 0;
+ if (!entity)
+ return 0;
+
+ return drm_sched_job_init(&(*job)->base, entity, owner);
}
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job)
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
+ struct drm_sched_entity *entity, void *owner,
+ size_t size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job)
{
int r;
- r = amdgpu_job_alloc(adev, 1, job, NULL);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
if (r)
return r;
(*job)->num_ibs = 1;
r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
- if (r)
+ if (r) {
+ if (entity)
+ drm_sched_job_cleanup(&(*job)->base);
kfree(*job);
+ }
return r;
}
@@ -191,6 +199,9 @@ void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
void amdgpu_job_free(struct amdgpu_job *job)
{
+ if (job->base.entity)
+ drm_sched_job_cleanup(&job->base);
+
amdgpu_job_free_resources(job);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
@@ -203,25 +214,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
dma_fence_put(&job->hw_fence);
}
-int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
- void *owner, struct dma_fence **f)
+struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
{
- int r;
-
- if (!f)
- return -EINVAL;
-
- r = drm_sched_job_init(&job->base, entity, owner);
- if (r)
- return r;
+ struct dma_fence *f;
drm_sched_job_arm(&job->base);
-
- *f = dma_fence_get(&job->base.s_fence->finished);
+ f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
drm_sched_entity_push_job(&job->base);
- return 0;
+ return f;
}
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index ab7b150e5d50..e2ecab977d0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -78,18 +78,20 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
return to_amdgpu_ring(job->base.entity->rq->sched);
}
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm);
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
+int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct drm_sched_entity *entity, void *owner,
+ unsigned int num_ibs, struct amdgpu_job **job);
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
+ struct drm_sched_entity *entity, void *owner,
+ size_t size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job);
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
struct amdgpu_job *leader);
void amdgpu_job_free(struct amdgpu_job *job);
-int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
- void *owner, struct dma_fence **f);
+struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job);
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 518eb0e40d32..de182bfcf85f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -150,14 +150,15 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
const unsigned ib_size_dw = 16;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
ib = &job->ibs[0];
- ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
+ ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0,
+ PACKETJ_TYPE0);
ib->ptr[1] = 0xDEADBEEF;
for (i = 2; i < 16; i += 2) {
ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5aae63891858..9f5c1d86d2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -189,7 +189,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct amdgpu_device *adev = ring->adev;
unsigned offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr;
- struct dma_fence *fence;
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
@@ -229,7 +228,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
@@ -269,18 +270,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
}
}
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_free;
-
- dma_fence_put(fence);
-
- return r;
-
-error_free:
- amdgpu_job_free(job);
- return r;
+ dma_fence_put(amdgpu_job_submit(job));
+ return 0;
}
/**
@@ -1414,7 +1405,8 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
}
static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
- unsigned long offset, void *buf, int len, int write)
+ unsigned long offset, void *buf,
+ int len, int write)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
@@ -1438,26 +1430,27 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
memcpy(adev->mman.sdma_access_ptr, buf, len);
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
goto out;
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
- src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
+ src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
+ src_mm.start;
dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
if (write)
swap(src_addr, dst_addr);
- amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
+ PAGE_SIZE, false);
amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r) {
- amdgpu_job_free(job);
- goto out;
- }
+ fence = amdgpu_job_submit(job);
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
r = -ETIMEDOUT;
@@ -1956,7 +1949,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
AMDGPU_IB_POOL_DELAYED;
int r;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4, pool, job);
if (r)
return r;
@@ -2015,8 +2010,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
if (direct_submit)
r = amdgpu_job_submit_direct(job, ring, fence);
else
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ *fence = amdgpu_job_submit(job);
if (r)
goto error_free;
@@ -2061,16 +2055,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
-
+ *fence = amdgpu_job_submit(job);
return 0;
-
-error_free:
- amdgpu_job_free(job);
- return r;
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 6eac649499d3..8baddf79635b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1132,7 +1132,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
- r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
+ r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ 64, direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
@@ -1181,10 +1183,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
goto err_free;
- r = amdgpu_job_submit(job, &adev->uvd.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err_free;
+ f = amdgpu_job_submit(job);
}
amdgpu_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 02cb3a12dd76..b239e874f2d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -450,8 +450,10 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
return r;
@@ -538,7 +540,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence *f = NULL;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
@@ -570,8 +574,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct)
r = amdgpu_job_submit_direct(job, ring, &f);
else
- r = amdgpu_job_submit(job, &ring->adev->vce.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ f = amdgpu_job_submit(job);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 0b52af415b28..3449145ab2bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -600,15 +600,16 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
+ u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r;
- r = amdgpu_job_alloc_with_ib(adev, 64,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ 64, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
goto err;
@@ -787,8 +788,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (sq)
ib_size_dw += 8;
- r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
goto err;
@@ -916,8 +918,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
if (sq)
ib_size_dw += 8;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
return r;
@@ -982,8 +985,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
if (sq)
ib_size_dw += 8;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 2b0669c464f6..6768af365f3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -47,6 +47,32 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
return r;
}
+/* Allocate a new job for @count PTE updates */
+static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
+ unsigned int count)
+{
+ enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+ : AMDGPU_IB_POOL_DELAYED;
+ struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
+ : &p->vm->delayed;
+ unsigned int ndw;
+ int r;
+
+ /* estimate how many dw we need */
+ ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
+ if (p->pages_addr)
+ ndw += count * 2;
+ ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
+
+ r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
+ ndw * 4, pool, &p->job);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ return 0;
+}
+
/**
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
*
@@ -61,17 +87,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
- enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
- : AMDGPU_IB_POOL_DELAYED;
- unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
+ r = amdgpu_vm_sdma_alloc_job(p, 0);
if (r)
return r;
- p->num_dw_left = ndw;
-
if (!resv)
return 0;
@@ -91,20 +112,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
struct amdgpu_ib *ib = p->job->ibs;
- struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
struct dma_fence *f;
- int r;
- entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
- ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
+ ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
+ sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
- r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
- if (r)
- goto error;
+ f = amdgpu_job_submit(p->job);
if (p->unlocked) {
struct dma_fence *tmp = dma_fence_get(f);
@@ -120,10 +137,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
swap(*fence, f);
dma_fence_put(f);
return 0;
-
-error:
- amdgpu_job_free(p->job);
- return r;
}
/**
@@ -203,8 +216,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t flags)
{
struct amdgpu_bo *bo = &vmbo->bo;
- enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
- : AMDGPU_IB_POOL_DELAYED;
struct dma_resv_iter cursor;
unsigned int i, ndw, nptes;
struct dma_fence *fence;
@@ -231,19 +242,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (r)
return r;
- /* estimate how many dw we need */
- ndw = 32;
- if (p->pages_addr)
- ndw += count * 2;
- ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
- ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
-
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
- &p->job);
+ r = amdgpu_vm_sdma_alloc_job(p, count);
if (r)
return r;
-
- p->num_dw_left = ndw;
}
if (!p->pages_addr) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f513e2c2e964..657e53708248 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -371,7 +371,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA
* itself.
*/
- r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+ r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
&job);
if (r)
goto error_alloc;
@@ -380,10 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
job->vm_needs_flush = true;
job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_submit;
+ fence = amdgpu_job_submit(job);
mutex_unlock(&adev->mman.gtt_window_lock);
@@ -392,9 +391,6 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
-error_submit:
- amdgpu_job_free(job);
-
error_alloc:
mutex_unlock(&adev->mman.gtt_window_lock);
DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 375c440957dc..5fe872f4bea7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -216,8 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -280,8 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index e668b3baa8c6..e407be6cb63c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -213,7 +213,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
*
* Open up a stream for HW test
*/
-static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
struct amdgpu_bo *bo,
struct dma_fence **fence)
{
@@ -224,8 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -276,7 +276,7 @@ err:
*
* Close up a stream for HW test or if userspace failed to do so
*/
-static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
struct amdgpu_bo *bo,
struct dma_fence **fence)
{
@@ -287,8 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 2797029bd500..79069c485386 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -65,8 +65,11 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = npages * 8;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_DELAYED, &job);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4 + num_bytes,
+ AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
return r;
@@ -89,18 +92,10 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
cpu_addr = &job->ibs[0].ptr[num_dw];
amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_free;
-
+ fence = amdgpu_job_submit(job);
dma_fence_put(fence);
return r;
-
-error_free:
- amdgpu_job_free(job);
- return r;
}
/**