summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
blob: 026863a0abcd3e3d62650ca496600b77dc1fde74 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
 * Copyright 2020-2021 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#ifndef KFD_SVM_H_
#define KFD_SVM_H_

#if IS_ENABLED(CONFIG_HSA_AMD_SVM)

#include <linux/rwsem.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include <linux/hmm.h>
#include "amdgpu.h"
#include "kfd_priv.h"

#define SVM_RANGE_VRAM_DOMAIN (1UL << 0)
#define SVM_ADEV_PGMAP_OWNER(adev)\
			((adev)->hive ? (void *)(adev)->hive : (void *)(adev))

struct svm_range_bo {
	struct amdgpu_bo		*bo;
	struct kref			kref;
	struct list_head		range_list; /* all svm ranges shared this bo */
	spinlock_t			list_lock;
	struct amdgpu_amdkfd_fence	*eviction_fence;
	struct work_struct		eviction_work;
	uint32_t			evicting;
	struct work_struct		release_work;
	struct kfd_node			*node;
};

enum svm_work_list_ops {
	SVM_OP_NULL,
	SVM_OP_UNMAP_RANGE,
	SVM_OP_UPDATE_RANGE_NOTIFIER,
	SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP,
	SVM_OP_ADD_RANGE,
	SVM_OP_ADD_RANGE_AND_MAP
};

struct svm_work_list_item {
	enum svm_work_list_ops op;
	struct mm_struct *mm;
};

/**
 * struct svm_range - shared virtual memory range
 *
 * @svms:       list of svm ranges, structure defined in kfd_process
 * @migrate_mutex: to serialize range migration, validation and mapping update
 * @start:      range start address in pages
 * @last:       range last address in pages
 * @it_node:    node [start, last] stored in interval tree, start, last are page
 *              aligned, page size is (last - start + 1)
 * @list:       link list node, used to scan all ranges of svms
 * @update_list:link list node used to add to update_list
 * @mapping:    bo_va mapping structure to create and update GPU page table
 * @npages:     number of pages
 * @vram_pages: vram pages number in this svm_range
 * @dma_addr:   dma mapping address on each GPU for system memory physical page
 * @ttm_res:    vram ttm resource map
 * @offset:     range start offset within mm_nodes
 * @svm_bo:     struct to manage splited amdgpu_bo
 * @svm_bo_list:link list node, to scan all ranges which share same svm_bo
 * @lock:       protect prange start, last, child_list, svm_bo_list
 * @saved_flags:save/restore current PF_MEMALLOC flags
 * @flags:      flags defined as KFD_IOCTL_SVM_FLAG_*
 * @perferred_loc: perferred location, 0 for CPU, or GPU id
 * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id
 * @actual_loc: this svm_range location. 0: all pages are from sys ram;
 *              GPU id: this svm_range may include vram pages from GPU with
 *              id actual_loc.
 * @granularity:migration granularity, log2 num pages
 * @invalid:    not 0 means cpu page table is invalidated
 * @validate_timestamp: system timestamp when range is validated
 * @notifier:   register mmu interval notifier
 * @work_item:  deferred work item information
 * @deferred_list: list header used to add range to deferred list
 * @child_list: list header for split ranges which are not added to svms yet
 * @bitmap_access: index bitmap of GPUs which can access the range
 * @bitmap_aip: index bitmap of GPUs which can access the range in place
 *
 * Data structure for virtual memory range shared by CPU and GPUs, it can be
 * allocated from system memory ram or device vram, and migrate from ram to vram
 * or from vram to ram.
 */
struct svm_range {
	struct svm_range_list		*svms;
	struct mutex			migrate_mutex;
	unsigned long			start;
	unsigned long			last;
	struct interval_tree_node	it_node;
	struct list_head		list;
	struct list_head		update_list;
	uint64_t			npages;
	uint64_t			vram_pages;
	dma_addr_t			*dma_addr[MAX_GPU_INSTANCE];
	struct ttm_resource		*ttm_res;
	uint64_t			offset;
	struct svm_range_bo		*svm_bo;
	struct list_head		svm_bo_list;
	struct mutex                    lock;
	unsigned int                    saved_flags;
	uint32_t			flags;
	uint32_t			preferred_loc;
	uint32_t			prefetch_loc;
	uint32_t			actual_loc;
	uint8_t				granularity;
	atomic_t			invalid;
	ktime_t				validate_timestamp;
	struct mmu_interval_notifier	notifier;
	struct svm_work_list_item	work_item;
	struct list_head		deferred_list;
	struct list_head		child_list;
	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
	bool				mapped_to_gpu;
};

static inline void svm_range_lock(struct svm_range *prange)
{
	mutex_lock(&prange->lock);
	prange->saved_flags = memalloc_noreclaim_save();

}
static inline void svm_range_unlock(struct svm_range *prange)
{
	memalloc_noreclaim_restore(prange->saved_flags);
	mutex_unlock(&prange->lock);
}

static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
{
	if (svm_bo)
		kref_get(&svm_bo->kref);

	return svm_bo;
}

int svm_range_list_init(struct kfd_process *p);
void svm_range_list_fini(struct kfd_process *p);
int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
	      uint64_t size, uint32_t nattrs,
	      struct kfd_ioctl_svm_attribute *attrs);
struct svm_range *svm_range_from_addr(struct svm_range_list *svms,
				      unsigned long addr,
				      struct svm_range **parent);
struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange,
					  uint32_t gpu_id);
int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
			    bool clear);
void svm_range_vram_node_free(struct svm_range *prange);
int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
			    uint32_t vmid, uint32_t node_id, uint64_t addr,
			    bool write_fault);
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
void svm_range_add_list_work(struct svm_range_list *svms,
			     struct svm_range *prange, struct mm_struct *mm,
			     enum svm_work_list_ops op);
void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
			 unsigned long offset, unsigned long npages);
void svm_range_dma_unmap(struct svm_range *prange);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
		       uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
			    uint8_t __user *user_priv_data,
			    uint64_t *priv_offset);
int kfd_criu_restore_svm(struct kfd_process *p,
			 uint8_t __user *user_priv_ptr,
			 uint64_t *priv_data_offset,
			 uint64_t max_priv_data_size);
int kfd_criu_resume_svm(struct kfd_process *p);
struct kfd_process_device *
svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node);
void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);

/* SVM API and HMM page migration work together, device memory type
 * is initialized to not 0 when page migration register device memory.
 */
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
					(adev)->gmc.is_app_apu)

void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);

void svm_range_set_max_pages(struct amdgpu_device *adev);
int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled);

#else

struct kfd_process;

static inline int svm_range_list_init(struct kfd_process *p)
{
	return 0;
}
static inline void svm_range_list_fini(struct kfd_process *p)
{
	/* empty */
}

static inline int svm_range_restore_pages(struct amdgpu_device *adev,
					  unsigned int pasid,
					  uint32_t client_id, uint32_t node_id,
					  uint64_t addr, bool write_fault)
{
	return -EFAULT;
}

static inline int svm_range_schedule_evict_svm_bo(
		struct amdgpu_amdkfd_fence *fence)
{
	WARN_ONCE(1, "SVM eviction fence triggered, but SVM is disabled");
	return -EINVAL;
}

static inline int svm_range_get_info(struct kfd_process *p,
				     uint32_t *num_svm_ranges,
				     uint64_t *svm_priv_data_size)
{
	*num_svm_ranges = 0;
	*svm_priv_data_size = 0;
	return 0;
}

static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,
					  uint8_t __user *user_priv_data,
					  uint64_t *priv_offset)
{
	return 0;
}

static inline int kfd_criu_restore_svm(struct kfd_process *p,
				       uint8_t __user *user_priv_ptr,
				       uint64_t *priv_data_offset,
				       uint64_t max_priv_data_size)
{
	return -EINVAL;
}

static inline int kfd_criu_resume_svm(struct kfd_process *p)
{
	return 0;
}

static inline void svm_range_set_max_pages(struct amdgpu_device *adev)
{
}

#define KFD_IS_SVM_API_SUPPORTED(dev) false

#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */

#endif /* KFD_SVM_H_ */