summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c517
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c271
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c426
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c237
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c253
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c762
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c442
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c163
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c806
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c158
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c946
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smum.h (renamed from drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h)4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c190
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c295
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c403
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c495
87 files changed, 6784 insertions, 2980 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 7335c0420c70..f3cb69de0c44 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -25,3 +25,5 @@ config DRM_AMDGPU_GART_DEBUGFS
Selecting this option creates a debugfs file to inspect the mapped
pages. Uses more memory for housekeeping, enable only for debugging.
+source "drivers/gpu/drm/amd/powerplay/Kconfig"
+source "drivers/gpu/drm/amd/acp/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7fcdcedaadb..21dd7c00da15 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -58,7 +58,8 @@ amdgpu-y += \
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o
+ dce_v11_0.o \
+ dce_virtual.o
# add GFX block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 06192698bd96..b8d66670bb17 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,6 +90,7 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
+#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@@ -119,6 +120,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
+#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */
@@ -147,6 +149,7 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
+#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -408,6 +411,10 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
+#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e055d5be1c3c..3cc2629eb158 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -51,6 +51,7 @@
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_ttm.h"
#include "amdgpu_gds.h"
#include "amd_powerplay.h"
#include "amdgpu_acp.h"
@@ -85,8 +86,14 @@ extern int amdgpu_vm_debug;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_powerplay;
+extern int amdgpu_powercontainment;
extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
+extern unsigned amdgpu_cg_mask;
+extern unsigned amdgpu_pg_mask;
+extern char *amdgpu_disable_cu;
+extern int amdgpu_sclk_deep_sleep_en;
+extern char *amdgpu_virtual_display;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -183,6 +190,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int amdgpu_set_powergating_state(struct amdgpu_device *adev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
struct amdgpu_ip_block_version {
enum amd_ip_block_type type;
@@ -240,10 +251,9 @@ struct amdgpu_vm_pte_funcs {
uint64_t pe, uint64_t src,
unsigned count);
/* write pte one entry at a time with addr mapping */
- void (*write_pte)(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+ void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr);
/* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe,
@@ -298,13 +308,16 @@ struct amdgpu_ring_funcs {
uint32_t oa_base, uint32_t oa_size);
/* testing functions */
int (*test_ring)(struct amdgpu_ring *ring);
- int (*test_ib)(struct amdgpu_ring *ring);
+ int (*test_ib)(struct amdgpu_ring *ring, long timeout);
/* insert NOP packets */
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ /* note usage for clock and power gating */
+ void (*begin_use)(struct amdgpu_ring *ring);
+ void (*end_use)(struct amdgpu_ring *ring);
};
/*
@@ -385,46 +398,9 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
- * TTM.
+ * BO.
*/
-#define AMDGPU_TTM_LRU_SIZE 20
-
-struct amdgpu_mman_lru {
- struct list_head *lru[TTM_NUM_MEM_TYPES];
- struct list_head *swap_lru;
-};
-
-struct amdgpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_device bdev;
- bool mem_global_referenced;
- bool initialized;
-
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *vram;
- struct dentry *gtt;
-#endif
-
- /* buffer handling */
- const struct amdgpu_buffer_funcs *buffer_funcs;
- struct amdgpu_ring *buffer_funcs_ring;
- /* Scheduler entity for buffer moves */
- struct amd_sched_entity entity;
-
- /* custom LRU management */
- struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
-};
-
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
- struct reservation_object *resv,
- struct fence **fence);
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
struct amdgpu_bo_list_entry {
struct amdgpu_bo *robj;
struct ttm_validate_buffer tv;
@@ -487,10 +463,12 @@ struct amdgpu_bo {
struct amdgpu_device *adev;
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
+ struct amdgpu_bo *shadow;
struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;
struct list_head mn_list;
+ struct list_head shadow_list;
};
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
@@ -594,11 +572,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
- struct fence *fence);
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
@@ -668,6 +644,8 @@ struct amdgpu_mc {
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
+ uint32_t srbm_soft_reset;
+ struct amdgpu_mode_mc_save save;
};
/*
@@ -712,10 +690,11 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
*/
struct amdgpu_flip_work {
- struct work_struct flip_work;
+ struct delayed_work flip_work;
struct work_struct unpin_work;
struct amdgpu_device *adev;
int crtc_id;
+ u32 target_vblank;
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_rbo;
@@ -754,12 +733,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
+void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
-void amdgpu_job_free_func(struct kref *refcount);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f);
-void amdgpu_job_timeout_func(struct work_struct *work);
struct amdgpu_ring {
struct amdgpu_device *adev;
@@ -767,12 +745,9 @@ struct amdgpu_ring {
struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler sched;
- spinlock_t fence_lock;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr_offs;
- u64 next_rptr_gpu_addr;
- volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
@@ -791,14 +766,16 @@ struct amdgpu_ring {
u32 doorbell_index;
bool use_doorbell;
unsigned wptr_offs;
- unsigned next_rptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
enum amdgpu_ring_type type;
char name[16];
unsigned cond_exe_offs;
- u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u64 cond_exe_gpu_addr;
+ volatile u32 *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *ent;
+#endif
};
/*
@@ -808,13 +785,17 @@ struct amdgpu_ring {
/* maximum number of VMIDs */
#define AMDGPU_NUM_VM 16
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
+
/* number of entries in page table */
#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
-#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
#define AMDGPU_PTE_VALID (1 << 0)
#define AMDGPU_PTE_SYSTEM (1 << 1)
@@ -826,10 +807,7 @@ struct amdgpu_ring {
#define AMDGPU_PTE_READABLE (1 << 5)
#define AMDGPU_PTE_WRITEABLE (1 << 6)
-/* PTE (Page Table Entry) fragment field for different page sizes */
-#define AMDGPU_PTE_FRAG_4KB (0 << 7)
-#define AMDGPU_PTE_FRAG_64KB (4 << 7)
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
@@ -839,6 +817,7 @@ struct amdgpu_ring {
struct amdgpu_vm_pt {
struct amdgpu_bo_list_entry entry;
uint64_t addr;
+ uint64_t shadow_addr;
};
struct amdgpu_vm {
@@ -861,6 +840,7 @@ struct amdgpu_vm {
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
struct fence *page_directory_fence;
+ uint64_t last_eviction_counter;
/* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables;
@@ -883,13 +863,14 @@ struct amdgpu_vm_id {
struct fence *first;
struct amdgpu_sync active;
struct fence *last_flush;
- struct amdgpu_ring *last_user;
atomic64_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
+ uint32_t current_gpu_reset_count;
+
uint32_t gds_base;
uint32_t gds_size;
uint32_t gws_base;
@@ -905,6 +886,10 @@ struct amdgpu_vm_manager {
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+ /* Handling of VM fences */
+ u64 fence_context;
+ unsigned seqno[AMDGPU_MAX_RINGS];
+
uint32_t max_pfn;
/* vram base address for page table entry */
u64 vram_base_offset;
@@ -926,19 +911,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct list_head *duplicates);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
- unsigned *vm_id, uint64_t *vm_pd_addr);
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
+ struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -947,7 +928,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- struct ttm_mem_reg *mem);
+ bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
@@ -1142,6 +1123,12 @@ struct amdgpu_cu_info {
uint32_t bitmap[4][4];
};
+struct amdgpu_gfx_funcs {
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+ void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gca_config config;
@@ -1178,6 +1165,11 @@ struct amdgpu_gfx {
/* ce ram size*/
unsigned ce_ram_size;
struct amdgpu_cu_info cu_info;
+ const struct amdgpu_gfx_funcs *funcs;
+
+ /* reset mask */
+ uint32_t grbm_soft_reset;
+ uint32_t srbm_soft_reset;
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1195,10 +1187,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
- uint32_t **data);
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
- unsigned size, uint32_t *data);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, u32 nop, u32 align_mask,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
@@ -1250,6 +1238,7 @@ struct amdgpu_job {
uint32_t num_ibs;
void *owner;
uint64_t ctx;
+ bool vm_needs_flush;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
@@ -1257,8 +1246,7 @@ struct amdgpu_job {
uint32_t oa_base, oa_size;
/* user fence handling */
- struct amdgpu_bo *uf_bo;
- uint32_t uf_offset;
+ uint64_t uf_addr;
uint64_t uf_sequence;
};
@@ -1560,6 +1548,12 @@ struct amdgpu_dpm_funcs {
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+ int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(struct amdgpu_device *adev);
+ int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*get_mclk_od)(struct amdgpu_device *adev);
+ int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
};
struct amdgpu_dpm {
@@ -1662,7 +1656,9 @@ struct amdgpu_uvd {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
bool address_64_bit;
+ bool use_ctx_buf;
struct amd_sched_entity entity;
+ uint32_t srbm_soft_reset;
};
/*
@@ -1683,11 +1679,13 @@ struct amdgpu_vce {
struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
struct delayed_work idle_work;
+ struct mutex idle_mutex;
const struct firmware *fw; /* VCE firmware */
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
unsigned harvest_config;
struct amd_sched_entity entity;
+ uint32_t srbm_soft_reset;
};
/*
@@ -1708,6 +1706,7 @@ struct amdgpu_sdma {
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
+ uint32_t srbm_soft_reset;
};
/*
@@ -1767,6 +1766,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor);
void amdgpu_debugfs_cleanup(struct drm_minor *minor);
#endif
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+
/*
* amdgpu smumgr functions
*/
@@ -1811,12 +1812,8 @@ struct amdgpu_asic_funcs {
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
int (*reset)(struct amdgpu_device *adev);
- /* wait for mc_idle */
- int (*wait_for_mc_idle)(struct amdgpu_device *adev);
/* get the reference clock */
u32 (*get_xclk)(struct amdgpu_device *adev);
- /* get the gpu clock counter */
- uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
@@ -1937,6 +1934,7 @@ struct amdgpu_ip_block_status {
bool valid;
bool sw;
bool hw;
+ bool hang;
};
struct amdgpu_device {
@@ -2003,6 +2001,10 @@ struct amdgpu_device {
spinlock_t didt_idx_lock;
amdgpu_rreg_t didt_rreg;
amdgpu_wreg_t didt_wreg;
+ /* protects concurrent gc_cac register access */
+ spinlock_t gc_cac_idx_lock;
+ amdgpu_rreg_t gc_cac_rreg;
+ amdgpu_wreg_t gc_cac_wreg;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t audio_endpt_idx_lock;
amdgpu_block_rreg_t audio_endpt_rreg;
@@ -2028,9 +2030,11 @@ struct amdgpu_device {
atomic64_t vram_vis_usage;
atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
+ atomic64_t num_evictions;
atomic_t gpu_reset_counter;
/* display */
+ bool enable_virtual_display;
struct amdgpu_mode_info mode_info;
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
@@ -2038,7 +2042,7 @@ struct amdgpu_device {
struct amdgpu_irq_src hpd_irq;
/* rings */
- unsigned fence_context;
+ u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
@@ -2093,6 +2097,10 @@ struct amdgpu_device {
struct kfd_dev *kfd;
struct amdgpu_virtualization virtualization;
+
+ /* link all shadow bo */
+ struct list_head shadow_list;
+ struct mutex shadow_list_lock;
};
bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2131,6 +2139,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
+#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
+#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
#define WREG32_P(reg, val, mask) \
@@ -2166,6 +2176,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
#define REG_GET_FIELD(value, reg, field) \
(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD(reg, field, val) \
+ WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
/*
* BIOS helpers.
*/
@@ -2206,23 +2219,21 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
*/
#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
-#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
-#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
+#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
@@ -2264,6 +2275,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
+#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \
@@ -2342,6 +2355,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_dpm_force_clock_level(adev, type, level) \
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+#define amdgpu_dpm_get_sclk_od(adev) \
+ (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+ (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
@@ -2349,6 +2374,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
/* Common functions */
int amdgpu_gpu_reset(struct amdgpu_device *adev);
+bool amdgpu_need_backup(struct amdgpu_device *adev);
void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
@@ -2374,6 +2400,8 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
+int amdgpu_ttm_global_init(struct amdgpu_device *adev);
void amdgpu_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
@@ -2383,9 +2411,13 @@ bool amdgpu_device_is_px(struct drm_device *dev);
#if defined(CONFIG_VGA_SWITCHEROO)
void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+bool amdgpu_is_atpx_hybrid(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
#endif
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 252edba16e36..892d60fb225b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -421,29 +421,6 @@ static int acp_suspend(void *handle)
static int acp_resume(void *handle)
{
- int i, ret;
- struct acp_pm_domain *apd;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- /* return early if no ACP */
- if (!adev->acp.acp_genpd)
- return 0;
-
- /* SMU block will power on ACP irrespective of ACP runtime status.
- * Power off explicitly based on genpd ACP runtime status so that ACP
- * hw and ACP-genpd status are in sync.
- * 'suspend_power_off' represents "Power status before system suspend"
- */
- if (adev->acp.acp_genpd->gpd.suspend_power_off == true) {
- apd = container_of(&adev->acp.acp_genpd->gpd,
- struct acp_pm_domain, gpd);
-
- for (i = 4; i >= 0 ; i--) {
- ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
- if (ret)
- pr_err("ACP tile %d tile suspend failed\n", i);
- }
- }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 32809f749903..d080d0807a5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
{
struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
- if (rdev->asic_funcs->get_gpu_clock_counter)
- return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+ if (rdev->gfx.funcs->get_gpu_clock_counter)
+ return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9df1bcb35bf0..1b621160b52e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -259,6 +259,33 @@ static const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown
};
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 size, data_offset;
+ u8 frev, crev;
+ ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+ ATOM_OBJECT_HEADER *obj_header;
+
+ if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+ return false;
+
+ if (crev < 2)
+ return false;
+
+ obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+ path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usDisplayPathTableOffset));
+
+ if (path_obj->ucNumOfDispPath)
+ return true;
+ else
+ return false;
+}
+
bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
@@ -551,28 +578,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
le16_to_cpu(firmware_info->info.usReferenceClock);
ppll->reference_div = 0;
- if (crev < 2)
- ppll->pll_out_min =
- le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
- else
- ppll->pll_out_min =
- le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+ ppll->pll_out_min =
+ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
ppll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (crev >= 4) {
- ppll->lcd_pll_out_min =
- le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_min == 0)
- ppll->lcd_pll_out_min = ppll->pll_out_min;
- ppll->lcd_pll_out_max =
- le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_max == 0)
- ppll->lcd_pll_out_max = ppll->pll_out_max;
- } else {
+ ppll->lcd_pll_out_min =
+ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_min == 0)
ppll->lcd_pll_out_min = ppll->pll_out_min;
+ ppll->lcd_pll_out_max =
+ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_max == 0)
ppll->lcd_pll_out_max = ppll->pll_out_max;
- }
if (ppll->pll_out_min == 0)
ppll->pll_out_min = 64800;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 8c2e69661799..15dd43ec38bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -140,6 +140,8 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *
uint8_t id);
void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
+
bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev);
int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 35a1248aaa77..49de92600074 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "amd_acpi.h"
@@ -27,6 +28,7 @@ struct amdgpu_atpx_functions {
struct amdgpu_atpx {
acpi_handle handle;
struct amdgpu_atpx_functions functions;
+ bool is_hybrid;
};
static struct amdgpu_atpx_priv {
@@ -63,6 +65,14 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+ return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool amdgpu_is_atpx_hybrid(void) {
+ return amdgpu_atpx_priv.atpx.is_hybrid;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -142,18 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- if (atpx->functions.power_cntl == false) {
- printk("ATPX dGPU power cntl not present, forcing\n");
- atpx->functions.power_cntl = true;
- }
+ u32 valid_bits = 0;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
size_t size;
- u32 valid_bits;
info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
if (!info)
@@ -172,19 +176,43 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
valid_bits = output.flags & output.valid_flags;
- /* if separate mux flag is set, mux controls are required */
- if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
- atpx->functions.i2c_mux_cntl = true;
- atpx->functions.disp_mux_cntl = true;
- }
- /* if any outputs are muxed, mux controls are required */
- if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
- ATPX_TV_SIGNAL_MUXED |
- ATPX_DFP_SIGNAL_MUXED))
- atpx->functions.disp_mux_cntl = true;
kfree(info);
}
+
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+
+ /* some bioses set these bits rather than flagging power_cntl as supported */
+ if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+ ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+ atpx->functions.power_cntl = true;
+
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+#if 1
+ /* This is a temporary hack until the D3 cold support
+ * makes it upstream. The ATPX power_control method seems
+ * to still work on even if the system should be using
+ * the new standardized hybrid D3 cold ACPI interface.
+ */
+ atpx->functions.power_cntl = true;
+#else
+ atpx->functions.power_cntl = false;
+#endif
+ atpx->is_hybrid = true;
+ }
+
return 0;
}
@@ -259,6 +287,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
@@ -507,7 +539,6 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
.switchto = amdgpu_atpx_switchto,
.power_state = amdgpu_atpx_power_state,
- .init = amdgpu_atpx_init,
.get_client_id = amdgpu_atpx_get_client_id,
};
@@ -542,6 +573,7 @@ static bool amdgpu_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
amdgpu_atpx_priv.atpx_detected = true;
+ amdgpu_atpx_init();
return true;
}
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 33e47a43ae32..345305235349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+ r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
+ false);
if (r)
goto exit_do_move;
r = fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 99ca75baa47d..2b6afe123f3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -352,22 +352,22 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
uint16_t tmp, bios_header_start;
r = amdgpu_atrm_get_bios(adev);
- if (r == false)
+ if (!r)
r = amdgpu_acpi_vfct_bios(adev);
- if (r == false)
+ if (!r)
r = igp_read_bios_from_vram(adev);
- if (r == false)
+ if (!r)
r = amdgpu_read_bios(adev);
- if (r == false) {
+ if (!r) {
r = amdgpu_read_bios_from_rom(adev);
}
- if (r == false) {
+ if (!r) {
r = amdgpu_read_disabled_bios(adev);
}
- if (r == false) {
+ if (!r) {
r = amdgpu_read_platform_bios(adev);
}
- if (r == false || adev->bios == NULL) {
+ if (!r || adev->bios == NULL) {
DRM_ERROR("Unable to locate a BIOS ROM\n");
adev->bios = NULL;
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 823bf5e0b0c8..651115dcce12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
unsigned last_entry = 0, first_userptr = num_entries;
unsigned i;
int r;
+ unsigned long total_size = 0;
array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
if (!array)
@@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = entry->robj;
+ total_size += amdgpu_bo_size(entry->robj);
trace_amdgpu_bo_list_set(list, entry->robj);
}
@@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
list->array = array;
list->num_entries = num_entries;
+ trace_amdgpu_cs_bo_status(list->num_entries, total_size);
return 0;
error_free:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index cf6f49fc1c75..bc0440f7a31d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
return RREG32_UVD_CTX(index);
case CGS_IND_REG__DIDT:
return RREG32_DIDT(index);
+ case CGS_IND_REG_GC_CAC:
+ return RREG32_GC_CAC(index);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
@@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
return WREG32_UVD_CTX(index, value);
case CGS_IND_REG__DIDT:
return WREG32_DIDT(index, value);
+ case CGS_IND_REG_GC_CAC:
+ return WREG32_GC_CAC(index, value);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
@@ -748,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ break;
case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
@@ -787,6 +794,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
}
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
@@ -795,13 +803,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->version = adev->pm.fw_version;
info->image_size = ucode_size;
+ info->ucode_start_address = ucode_start_address;
info->kptr = (void *)src;
}
return 0;
}
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
- struct cgs_system_info *sys_info)
+ struct cgs_system_info *sys_info)
{
CGS_FUNC_ADEV;
@@ -821,6 +830,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_PCIE_MLW:
sys_info->value = adev->pm.pcie_mlw_mask;
break;
+ case CGS_SYSTEM_INFO_PCIE_DEV:
+ sys_info->value = adev->pdev->device;
+ break;
+ case CGS_SYSTEM_INFO_PCIE_REV:
+ sys_info->value = adev->pdev->revision;
+ break;
case CGS_SYSTEM_INFO_CG_FLAGS:
sys_info->value = adev->cg_flags;
break;
@@ -830,6 +845,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_GFX_CU_INFO:
sys_info->value = adev->gfx.cu_info.number;
break;
+ case CGS_SYSTEM_INFO_GFX_SE_INFO:
+ sys_info->value = adev->gfx.config.max_shader_engines;
+ break;
default:
return -ENODEV;
}
@@ -903,14 +921,12 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
acpi_handle handle;
struct acpi_object_list input;
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *params = NULL;
- union acpi_object *obj = NULL;
+ union acpi_object *params, *obj;
uint8_t name[5] = {'\0'};
- struct cgs_acpi_method_argument *argument = NULL;
+ struct cgs_acpi_method_argument *argument;
uint32_t i, count;
acpi_status status;
- int result = 0;
- uint32_t func_no = 0xFFFFFFFF;
+ int result;
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
@@ -927,7 +943,6 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if (info->pinput_argument == NULL)
return -EINVAL;
argument = info->pinput_argument;
- func_no = argument->value;
for (i = 0; i < info->input_count; i++) {
if (((argument->type == ACPI_TYPE_STRING) ||
(argument->type == ACPI_TYPE_BUFFER)) &&
@@ -972,11 +987,11 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
params->integer.value = argument->value;
break;
case ACPI_TYPE_STRING:
- params->string.length = argument->method_length;
+ params->string.length = argument->data_length;
params->string.pointer = argument->pointer;
break;
case ACPI_TYPE_BUFFER:
- params->buffer.length = argument->method_length;
+ params->buffer.length = argument->data_length;
params->buffer.pointer = argument->pointer;
break;
default:
@@ -996,7 +1011,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if (ACPI_FAILURE(status)) {
result = -EIO;
- goto error;
+ goto free_input;
}
/* return the output info */
@@ -1006,7 +1021,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if ((obj->type != ACPI_TYPE_PACKAGE) ||
(obj->package.count != count)) {
result = -EIO;
- goto error;
+ goto free_obj;
}
params = obj->package.elements;
} else
@@ -1014,13 +1029,13 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if (params == NULL) {
result = -EIO;
- goto error;
+ goto free_obj;
}
for (i = 0; i < count; i++) {
if (argument->type != params->type) {
result = -EIO;
- goto error;
+ goto free_obj;
}
switch (params->type) {
case ACPI_TYPE_INTEGER:
@@ -1030,7 +1045,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if ((params->string.length != argument->data_length) ||
(params->string.pointer == NULL)) {
result = -EIO;
- goto error;
+ goto free_obj;
}
strncpy(argument->pointer,
params->string.pointer,
@@ -1039,7 +1054,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
case ACPI_TYPE_BUFFER:
if (params->buffer.pointer == NULL) {
result = -EIO;
- goto error;
+ goto free_obj;
}
memcpy(argument->pointer,
params->buffer.pointer,
@@ -1052,9 +1067,10 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
params++;
}
-error:
- if (obj != NULL)
- kfree(obj);
+ result = 0;
+free_obj:
+ kfree(obj);
+free_input:
kfree((void *)input.pointer);
return result;
}
@@ -1066,7 +1082,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
}
#endif
-int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
+static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
@@ -1079,17 +1095,14 @@ int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
struct cgs_acpi_method_info info = {0};
acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
- acpi_input[0].method_length = sizeof(uint32_t);
acpi_input[0].data_length = sizeof(uint32_t);
acpi_input[0].value = acpi_function;
acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
- acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_input[1].data_length = input_size;
acpi_input[1].pointer = pinput;
acpi_output.type = CGS_ACPI_TYPE_BUFFER;
- acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_output.data_length = output_size;
acpi_output.pointer = poutput;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index cb07da41152b..319a5e1d9389 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1504,6 +1504,86 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.force = amdgpu_connector_dvi_force,
};
+static struct drm_encoder *
+amdgpu_connector_virtual_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
+ continue;
+
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ return encoder;
+ }
+
+ /* pick the first one */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+ return NULL;
+}
+
+static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+
+ if (encoder) {
+ amdgpu_connector_add_common_modes(encoder, connector);
+ }
+
+ return 0;
+}
+
+static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
+{
+ return 0;
+}
+
+static enum drm_connector_status
+
+amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+int amdgpu_connector_virtual_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void amdgpu_connector_virtual_force(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
+ .get_modes = amdgpu_connector_virtual_get_modes,
+ .mode_valid = amdgpu_connector_virtual_mode_valid,
+ .best_encoder = amdgpu_connector_virtual_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
+ .dpms = amdgpu_connector_virtual_dpms,
+ .detect = amdgpu_connector_virtual_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = amdgpu_connector_virtual_set_property,
+ .destroy = amdgpu_connector_destroy,
+ .force = amdgpu_connector_virtual_force,
+};
+
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
@@ -1690,7 +1770,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -1889,12 +1968,25 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
+ case DRM_MODE_CONNECTOR_VIRTUAL:
+ amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+ if (!amdgpu_dig_connector)
+ goto failed;
+ amdgpu_connector->con_priv = amdgpu_dig_connector;
+ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ break;
}
}
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9bc8f1d99733..d80e5d3a4add 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -216,11 +216,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (ret)
goto free_all_kdata;
- if (p->uf_entry.robj) {
- p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
- p->job->uf_offset = uf_offset;
- }
-
+ if (p->uf_entry.robj)
+ p->job->uf_addr = uf_offset;
kfree(chunk_array);
return 0;
@@ -290,18 +287,56 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
return max(bytes_moved_threshold, 1024*1024ull);
}
+static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+ struct amdgpu_bo *bo)
+{
+ u64 initial_bytes_moved;
+ uint32_t domain;
+ int r;
+
+ if (bo->pin_count)
+ return 0;
+
+ /* Avoid moving this one if we have moved too many buffers
+ * for this IB already.
+ *
+ * Note that this allows moving at least one buffer of
+ * any size, because it doesn't take the current "bo"
+ * into account. We don't want to disallow buffer moves
+ * completely.
+ */
+ if (p->bytes_moved <= p->bytes_moved_threshold)
+ domain = bo->prefered_domains;
+ else
+ domain = bo->allowed_domains;
+
+retry:
+ amdgpu_ttm_placement_from_domain(bo, domain);
+ initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ initial_bytes_moved;
+
+ if (unlikely(r)) {
+ if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
+ domain = bo->allowed_domains;
+ goto retry;
+ }
+ }
+
+ return r;
+}
+
int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
struct amdgpu_bo_list_entry *lobj;
- u64 initial_bytes_moved;
int r;
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = lobj->robj;
bool binding_userptr = false;
struct mm_struct *usermm;
- uint32_t domain;
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm && usermm != current->mm)
@@ -316,35 +351,13 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
binding_userptr = true;
}
- if (bo->pin_count)
- continue;
-
- /* Avoid moving this one if we have moved too many buffers
- * for this IB already.
- *
- * Note that this allows moving at least one buffer of
- * any size, because it doesn't take the current "bo"
- * into account. We don't want to disallow buffer moves
- * completely.
- */
- if (p->bytes_moved <= p->bytes_moved_threshold)
- domain = bo->prefered_domains;
- else
- domain = bo->allowed_domains;
-
- retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
- initial_bytes_moved;
-
- if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
- domain = bo->allowed_domains;
- goto retry;
- }
+ r = amdgpu_cs_bo_validate(p, bo);
+ if (r)
return r;
+ if (bo->shadow) {
+ r = amdgpu_cs_bo_validate(p, bo);
+ if (r)
+ return r;
}
if (binding_userptr) {
@@ -389,8 +402,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
goto error_free_pages;
+ }
/* Without a BO list we don't have userptr BOs */
if (!p->bo_list)
@@ -430,9 +445,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
/* Unreserve everything again. */
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- /* We tried to often, just abort */
+ /* We tried too many times, just abort */
if (!--tries) {
r = -EDEADLK;
+ DRM_ERROR("deadlock in %s\n", __func__);
goto error_free_pages;
}
@@ -444,11 +460,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
sizeof(struct page*));
if (!e->user_pages) {
r = -ENOMEM;
+ DRM_ERROR("calloc failure in %s\n", __func__);
goto error_free_pages;
}
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
if (r) {
+ DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
drm_free_large(e->user_pages);
e->user_pages = NULL;
goto error_free_pages;
@@ -459,18 +477,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
+ amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
r = amdgpu_cs_list_validate(p, &duplicates);
- if (r)
+ if (r) {
+ DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
goto error_validate;
+ }
r = amdgpu_cs_list_validate(p, &p->validated);
- if (r)
+ if (r) {
+ DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
goto error_validate;
+ }
+
+ fpriv->vm.last_eviction_counter =
+ atomic64_read(&p->adev->num_evictions);
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
@@ -499,6 +524,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
}
+ if (p->uf_entry.robj)
+ p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+
error_validate:
if (r) {
amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
@@ -614,7 +642,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (bo_va == NULL)
continue;
- r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
@@ -653,18 +681,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
+ p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
- }
+ } else {
+ p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
- r = amdgpu_bo_vm_update_pte(p, vm);
- if (!r)
- amdgpu_cs_sync_rings(p);
+ r = amdgpu_bo_vm_update_pte(p, vm);
+ if (r)
+ return r;
+ }
- return r;
+ return amdgpu_cs_sync_rings(p);
}
static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
@@ -761,7 +792,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
/* UVD & VCE fw doesn't support user fences */
- if (parser->job->uf_bo && (
+ if (parser->job->uf_addr && (
parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
@@ -830,17 +861,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
{
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
- struct fence *fence;
struct amdgpu_job *job;
int r;
job = p->job;
p->job = NULL;
- r = amd_sched_job_init(&job->base, &ring->sched,
- entity, amdgpu_job_timeout_func,
- amdgpu_job_free_func,
- p->filp, &fence);
+ r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
return r;
@@ -848,9 +875,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->ctx = entity->fence_context;
- p->fence = fence_get(fence);
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
+ p->fence = fence_get(&job->base.s_fence->finished);
+ cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
+ amdgpu_job_free_resources(job);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6e920086af46..c38dc47cd767 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
@@ -35,6 +36,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
#include "amdgpu.h"
+#include "amdgpu_trace.h"
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
@@ -44,6 +46,7 @@
#endif
#include "vi.h"
#include "bif/bif_4_1_d.h"
+#include <linux/pci.h>
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
@@ -79,24 +82,27 @@ bool amdgpu_device_is_px(struct drm_device *dev)
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
bool always_indirect)
{
+ uint32_t ret;
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
- return readl(((void __iomem *)adev->rmmio) + (reg * 4));
+ ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
else {
unsigned long flags;
- uint32_t ret;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
- return ret;
}
+ trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+ return ret;
}
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
bool always_indirect)
{
+ trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else {
@@ -1070,11 +1076,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
state);
if (r)
return r;
+ break;
}
}
return r;
@@ -1087,16 +1096,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
state);
if (r)
return r;
+ break;
}
}
return r;
}
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].type == block_type) {
+ r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+ if (r)
+ return r;
+ break;
+ }
+ }
+ return 0;
+
+}
+
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].type == block_type)
+ return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+ }
+ return true;
+
+}
+
const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
struct amdgpu_device *adev,
enum amd_ip_block_type type)
@@ -1136,10 +1182,38 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+{
+ adev->enable_virtual_display = false;
+
+ if (amdgpu_virtual_display) {
+ struct drm_device *ddev = adev->ddev;
+ const char *pci_address_name = pci_name(ddev->pdev);
+ char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+
+ pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
+ pciaddstr_tmp = pciaddstr;
+ while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+ if (!strcmp(pci_address_name, pciaddname)) {
+ adev->enable_virtual_display = true;
+ break;
+ }
+ }
+
+ DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display);
+
+ kfree(pciaddstr);
+ }
+}
+
static int amdgpu_early_init(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_whether_enable_virtual_display(adev);
+
switch (adev->asic_type) {
case CHIP_TOPAZ:
case CHIP_TONGA:
@@ -1209,6 +1283,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
}
}
+ adev->cg_flags &= amdgpu_cg_mask;
+ adev->pg_flags &= amdgpu_pg_mask;
+
return 0;
}
@@ -1440,9 +1517,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
adev->didt_rreg = &amdgpu_invalid_rreg;
adev->didt_wreg = &amdgpu_invalid_wreg;
+ adev->gc_cac_rreg = &amdgpu_invalid_rreg;
+ adev->gc_cac_wreg = &amdgpu_invalid_wreg;
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
+
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -1467,8 +1547,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->pcie_idx_lock);
spin_lock_init(&adev->uvd_ctx_idx_lock);
spin_lock_init(&adev->didt_idx_lock);
+ spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
+ INIT_LIST_HEAD(&adev->shadow_list);
+ mutex_init(&adev->shadow_list_lock);
+
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
@@ -1511,17 +1595,20 @@ int amdgpu_device_init(struct amdgpu_device *adev,
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
/* Read BIOS */
- if (!amdgpu_get_bios(adev))
- return -EINVAL;
+ if (!amdgpu_get_bios(adev)) {
+ r = -EINVAL;
+ goto failed;
+ }
/* Must be an ATOMBIOS */
if (!adev->is_atom_bios) {
dev_err(adev->dev, "Expecting atombios for GPU\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto failed;
}
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- return r;
+ goto failed;
}
/* See if the asic supports SR-IOV */
@@ -1538,7 +1625,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
!(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto failed;
}
DRM_INFO("GPU not posted. posting now...\n");
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1548,7 +1636,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- return r;
+ goto failed;
}
/* init i2c buses */
amdgpu_atombios_i2c_init(adev);
@@ -1557,7 +1645,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
- return r;
+ goto failed;
}
/* init the mode config */
@@ -1567,7 +1655,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
amdgpu_fini(adev);
- return r;
+ goto failed;
}
adev->accel_working = true;
@@ -1577,7 +1665,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- return r;
+ goto failed;
}
r = amdgpu_ib_ring_tests(adev);
@@ -1594,6 +1682,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_ERROR("registering register debugfs failed (%d).\n", r);
}
+ r = amdgpu_debugfs_firmware_init(adev);
+ if (r) {
+ DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ return r;
+ }
+
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -1619,10 +1713,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
- return r;
+ goto failed;
}
return 0;
+
+failed:
+ if (runtime)
+ vga_switcheroo_fini_domain_pm_ops(adev->dev);
+ return r;
}
static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
@@ -1645,6 +1744,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
+ drm_crtc_force_disable_all(adev->ddev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
kfree(adev->ip_block_status);
@@ -1656,6 +1756,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
vga_switcheroo_unregister_client(adev->pdev);
+ if (adev->flags & AMD_IS_PX)
+ vga_switcheroo_fini_domain_pm_ops(adev->dev);
vga_client_register(adev->pdev, NULL, NULL, NULL);
if (adev->rio_mem)
pci_iounmap(adev->pdev, adev->rio_mem);
@@ -1841,7 +1943,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
+
+ /*
+ * Most of the connector probing functions try to acquire runtime pm
+ * refs to ensure that the GPU is powered on when connector polling is
+ * performed. Since we're calling this from a runtime PM callback,
+ * trying to acquire rpm refs will cause us to deadlock.
+ *
+ * Since we're guaranteed to be holding the rpm lock, it's safe to
+ * temporarily disable the rpm helpers so this doesn't deadlock us.
+ */
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth++;
+#endif
drm_helper_hpd_irq_event(dev);
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth--;
+#endif
if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0);
@@ -1851,6 +1969,126 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
return 0;
}
+static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+{
+ int i;
+ bool asic_hang = false;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].funcs->check_soft_reset)
+ adev->ip_blocks[i].funcs->check_soft_reset(adev);
+ if (adev->ip_block_status[i].hang) {
+ DRM_INFO("IP block:%d is hang!\n", i);
+ asic_hang = true;
+ }
+ }
+ return asic_hang;
+}
+
+int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_block_status[i].hang &&
+ adev->ip_blocks[i].funcs->pre_soft_reset) {
+ r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+{
+ if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
+ DRM_INFO("Some block need full reset!\n");
+ return true;
+ }
+ return false;
+}
+
+static int amdgpu_soft_reset(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_block_status[i].hang &&
+ adev->ip_blocks[i].funcs->soft_reset) {
+ r = adev->ip_blocks[i].funcs->soft_reset(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_block_status[i].hang &&
+ adev->ip_blocks[i].funcs->post_soft_reset)
+ r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ return amdgpu_lockup_timeout > 0 ? true : false;
+}
+
+static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct fence **fence)
+{
+ uint32_t domain;
+ int r;
+
+ if (!bo->shadow)
+ return 0;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (r)
+ return r;
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ /* if bo has been evicted, then no need to recover */
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
+ NULL, fence, true);
+ if (r) {
+ DRM_ERROR("recover page table failed!\n");
+ goto err;
+ }
+ }
+err:
+ amdgpu_bo_unreserve(bo);
+ return r;
+}
+
/**
* amdgpu_gpu_reset - reset the asic
*
@@ -1861,69 +2099,123 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
*/
int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
- unsigned ring_sizes[AMDGPU_MAX_RINGS];
- uint32_t *ring_data[AMDGPU_MAX_RINGS];
-
- bool saved = false;
-
int i, r;
int resched;
+ bool need_full_reset;
+
+ if (!amdgpu_check_soft_reset(adev)) {
+ DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+ return 0;
+ }
atomic_inc(&adev->gpu_reset_counter);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
- r = amdgpu_suspend(adev);
-
+ /* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring)
continue;
+ kthread_park(ring->sched.thread);
+ amd_sched_hw_job_reset(&ring->sched);
+ }
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(adev);
+
+ need_full_reset = amdgpu_need_full_reset(adev);
- ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
- if (ring_sizes[i]) {
- saved = true;
- dev_info(adev->dev, "Saved %d dwords of commands "
- "on ring %d.\n", ring_sizes[i], i);
+ if (!need_full_reset) {
+ amdgpu_pre_soft_reset(adev);
+ r = amdgpu_soft_reset(adev);
+ amdgpu_post_soft_reset(adev);
+ if (r || amdgpu_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
}
}
+ if (need_full_reset) {
+ /* save scratch */
+ amdgpu_atombios_scratch_regs_save(adev);
+ r = amdgpu_suspend(adev);
+
retry:
- r = amdgpu_asic_reset(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ /* Disable fb access */
+ if (adev->mode_info.num_crtc) {
+ struct amdgpu_mode_mc_save save;
+ amdgpu_display_stop_mc_access(adev, &save);
+ amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+ }
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_resume(adev);
- }
+ r = amdgpu_asic_reset(adev);
+ /* post card */
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (!r) {
+ dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_resume(adev);
+ }
+ /* restore scratch */
+ amdgpu_atombios_scratch_regs_restore(adev);
+ }
if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ r = amdgpu_ib_ring_tests(adev);
+ if (r) {
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_suspend(adev);
+ need_full_reset = true;
+ goto retry;
+ }
+ /**
+ * recovery vm page tables, since we cannot depend on VRAM is
+ * consistent after gpu full reset.
+ */
+ if (need_full_reset && amdgpu_need_backup(adev)) {
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_bo *bo, *tmp;
+ struct fence *fence = NULL, *next = NULL;
+
+ DRM_INFO("recover vram bo from shadow\n");
+ mutex_lock(&adev->shadow_list_lock);
+ list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+ amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+ if (fence) {
+ r = fence_wait(fence, false);
+ if (r) {
+ WARN(r, "recovery from shadow isn't comleted\n");
+ break;
+ }
+ }
+
+ fence_put(fence);
+ fence = next;
+ }
+ mutex_unlock(&adev->shadow_list_lock);
+ if (fence) {
+ r = fence_wait(fence, false);
+ if (r)
+ WARN(r, "recovery from shadow isn't comleted\n");
+ }
+ fence_put(fence);
+ }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
- amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
- ring_sizes[i] = 0;
- ring_data[i] = NULL;
- }
-
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- if (saved) {
- saved = false;
- r = amdgpu_suspend(adev);
- goto retry;
- }
+ amd_sched_job_recovery(&ring->sched);
+ kthread_unpark(ring->sched.thread);
}
} else {
- amdgpu_fence_driver_force_completion(adev);
+ dev_err(adev->dev, "asic resume failed (%d).\n", r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i])
- kfree(ring_data[i]);
+ if (adev->rings[i]) {
+ kthread_unpark(adev->rings[i]->sched.thread);
+ }
}
}
@@ -1938,9 +2230,6 @@ retry:
return r;
}
-#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
-#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
-
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
@@ -2094,20 +2383,50 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos >> 24) & 0x3FF;
+ sh_bank = (*pos >> 34) & 0x3FF;
+ instance_bank = (*pos >> 44) & 0x3FF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= 0x3FFFF;
+
+ if (use_bank) {
+ if (sh_bank >= adev->gfx.config.max_sh_per_se ||
+ se_bank >= adev->gfx.config.max_shader_engines)
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
while (size) {
uint32_t value;
if (*pos > adev->rmmio_size)
- return result;
+ goto end;
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ if (r) {
+ result = r;
+ goto end;
+ }
result += 4;
buf += 4;
@@ -2115,6 +2434,15 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size -= 4;
}
+end:
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
return result;
}
@@ -2314,6 +2642,77 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
return result;
}
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+ uint32_t *config, no_regs = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ /* version, increment each time something is added */
+ config[no_regs++] = 2;
+ config[no_regs++] = adev->gfx.config.max_shader_engines;
+ config[no_regs++] = adev->gfx.config.max_tile_pipes;
+ config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+ config[no_regs++] = adev->gfx.config.max_sh_per_se;
+ config[no_regs++] = adev->gfx.config.max_backends_per_se;
+ config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+ config[no_regs++] = adev->gfx.config.max_gprs;
+ config[no_regs++] = adev->gfx.config.max_gs_threads;
+ config[no_regs++] = adev->gfx.config.max_hw_contexts;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+ config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.num_tile_pipes;
+ config[no_regs++] = adev->gfx.config.backend_enable_mask;
+ config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+ config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+ config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+ config[no_regs++] = adev->gfx.config.num_gpus;
+ config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+ config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+ config[no_regs++] = adev->gfx.config.gb_addr_config;
+ config[no_regs++] = adev->gfx.config.num_rbs;
+
+ /* rev==1 */
+ config[no_regs++] = adev->rev_id;
+ config[no_regs++] = adev->pg_flags;
+ config[no_regs++] = adev->cg_flags;
+
+ /* rev==2 */
+ config[no_regs++] = adev->family;
+ config[no_regs++] = adev->external_rev_id;
+
+ while (size && (*pos < no_regs * 4)) {
+ uint32_t value;
+
+ value = config[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ kfree(config);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ kfree(config);
+ return result;
+}
+
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -2339,11 +2738,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gca_config_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
+ &amdgpu_debugfs_gca_config_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2351,6 +2757,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
+ "amdgpu_gca_config",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b0832da2ef7e..9af8d3c7ae8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
container_of(cb, struct amdgpu_flip_work, cb);
fence_put(f);
- schedule_work(&work->flip_work);
+ schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
@@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
static void amdgpu_flip_work_func(struct work_struct *__work)
{
+ struct delayed_work *delayed_work =
+ container_of(__work, struct delayed_work, work);
struct amdgpu_flip_work *work =
- container_of(__work, struct amdgpu_flip_work, flip_work);
+ container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
- unsigned i, repcnt = 4;
- int vpos, hpos, stat, min_udelay = 0;
- struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+ unsigned i;
+ int vpos, hpos;
if (amdgpu_flip_handle_fence(work, &work->excl))
return;
@@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
if (amdgpu_flip_handle_fence(work, &work->shared[i]))
return;
- /* We borrow the event spin lock for protecting flip_status */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
- /* If this happens to execute within the "virtually extended" vblank
- * interval before the start of the real vblank interval then it needs
- * to delay programming the mmio flip until the real vblank is entered.
- * This prevents completing a flip too early due to the way we fudge
- * our vblank counter and vblank timestamps in order to work around the
- * problem that the hw fires vblank interrupts before actual start of
- * vblank (when line buffer refilling is done for a frame). It
- * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
- * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
- *
- * In practice this won't execute very often unless on very fast
- * machines because the time window for this to happen is very small.
+ /* Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
*/
- while (amdgpuCrtc->enabled && --repcnt) {
- /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
- * start in hpos, and to the "fudged earlier" vblank start in
- * vpos.
- */
- stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
- GET_DISTANCE_TO_VBLANKSTART,
- &vpos, &hpos, NULL, NULL,
- &crtc->hwmode);
-
- if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
- !(vpos >= 0 && hpos <= 0))
- break;
-
- /* Sleep at least until estimated real start of hw vblank */
- min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
- if (min_udelay > vblank->framedur_ns / 2000) {
- /* Don't wait ridiculously long - something is wrong */
- repcnt = 0;
- break;
- }
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- usleep_range(min_udelay, 2 * min_udelay);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- };
+ if (amdgpuCrtc->enabled &&
+ (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(work->target_vblank -
+ amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+ schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
+ return;
+ }
- if (!repcnt)
- DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
- "framedur %d, linedur %d, stat %d, vpos %d, "
- "hpos %d\n", work->crtc_id, min_udelay,
- vblank->framedur_ns / 1000,
- vblank->linedur_ns / 1000, stat, vpos, hpos);
+ /* We borrow the event spin lock for protecting flip_status */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
@@ -169,10 +138,10 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work);
}
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
- INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
+ INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
work->event = event;
@@ -220,31 +189,25 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
- goto cleanup;
+ goto unreserve;
}
r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(new_rbo);
DRM_ERROR("failed to get fences for buffer\n");
- goto cleanup;
+ goto unpin;
}
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
amdgpu_bo_unreserve(new_rbo);
work->base = base;
-
- r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id);
- if (r) {
- DRM_ERROR("failed to get vblank before flip\n");
- goto pflip_cleanup;
- }
+ work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -252,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
- goto vblank_cleanup;
+ goto pflip_cleanup;
}
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
@@ -264,20 +227,19 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- amdgpu_flip_work_func(&work->flip_work);
+ amdgpu_flip_work_func(&work->flip_work.work);
return 0;
-vblank_cleanup:
- drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id);
-
pflip_cleanup:
if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
+unpin:
if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
+unreserve:
amdgpu_bo_unreserve(new_rbo);
cleanup:
@@ -335,7 +297,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set)
return ret;
}
-static const char *encoder_names[38] = {
+static const char *encoder_names[41] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
@@ -374,6 +336,9 @@ static const char *encoder_names[38] = {
"TRAVIS",
"INTERNAL_VCE",
"INTERNAL_UNIPHY3",
+ "HDMI_ANX9805",
+ "INTERNAL_AMCLK",
+ "VIRTUAL",
};
static const char *hpd_names[6] = {
@@ -516,9 +481,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
- if (amdgpu_fb->obj) {
- drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
- }
+ drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(amdgpu_fb);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f888c015f76c..7c911d0be2b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -52,9 +52,12 @@
* - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
* - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
* at the end of IBs.
+ * - 3.3.0 - Add VM support for UVD on supported hardware.
+ * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
+ * - 3.5.0 - Add support for new UVD_NO_OP register.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 2
+#define KMS_DRIVER_MINOR 5
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -82,8 +85,14 @@ int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
+int amdgpu_powercontainment = 1;
+int amdgpu_sclk_deep_sleep_en = 1;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
+unsigned amdgpu_cg_mask = 0xffffffff;
+unsigned amdgpu_pg_mask = 0xffffffff;
+char *amdgpu_disable_cu = NULL;
+char *amdgpu_virtual_display = NULL;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -160,14 +169,32 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
+
+MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
+module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
#endif
+MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
+module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
+
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
+module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+
+MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
+module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+
+MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
+module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+
+MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
@@ -324,7 +351,7 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
kfree(ap);
return 0;
@@ -413,7 +440,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
+ if (amdgpu_is_atpx_hybrid())
+ pci_set_power_state(pdev, PCI_D3cold);
+ else if (!amdgpu_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D3hot);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return 0;
@@ -430,7 +460,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(pdev, PCI_D0);
+ if (amdgpu_is_atpx_hybrid() ||
+ !amdgpu_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
@@ -515,7 +547,7 @@ static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_PRIME | DRIVER_RENDER,
+ DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
.dev_priv_size = 0,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
@@ -590,7 +622,6 @@ static int __init amdgpu_init(void)
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;
- driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 919146780a15..bf033b58056c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -25,7 +25,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d1558768cfb7..0b109aebfec6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
- while (last_seq != seq) {
+ if (unlikely(seq == last_seq))
+ return;
+
+ last_seq &= drv->num_fences_mask;
+ seq &= drv->num_fences_mask;
+
+ do {
struct fence *fence, **ptr;
- ptr = &drv->fences[++last_seq & drv->num_fences_mask];
+ ++last_seq;
+ last_seq &= drv->num_fences_mask;
+ ptr = &drv->fences[last_seq];
/* There is always exactly one thread signaling this fence slot */
fence = rcu_dereference_protected(*ptr, 1);
RCU_INIT_POINTER(*ptr, NULL);
- BUG_ON(!fence);
+ if (!fence)
+ continue;
r = fence_signal(fence);
if (!r)
@@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
BUG();
fence_put(fence);
- }
+ } while (last_seq != seq);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index 503d54098128..e73728d90388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -31,14 +31,6 @@
#define AMDGPU_GWS_SHIFT PAGE_SHIFT
#define AMDGPU_OA_SHIFT PAGE_SHIFT
-#define AMDGPU_PL_GDS TTM_PL_PRIV0
-#define AMDGPU_PL_GWS TTM_PL_PRIV1
-#define AMDGPU_PL_OA TTM_PL_PRIV2
-
-#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0
-#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1
-#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2
-
struct amdgpu_ring;
struct amdgpu_bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8fab6486064f..88fbed2389c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_print;
- amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
+ amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 9f95da4f0536..a074edd95c70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
}
}
}
+
+/**
+ * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
+ *
+ * @mask: array in which the per-shader array disable masks will be stored
+ * @max_se: number of SEs
+ * @max_sh: number of SHs
+ *
+ * The bitmask of CUs to be disabled in the shader array determined by se and
+ * sh is stored in mask[se * max_sh + sh].
+ */
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
+{
+ unsigned se, sh, cu;
+ const char *p;
+
+ memset(mask, 0, sizeof(*mask) * max_se * max_sh);
+
+ if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
+ return;
+
+ p = amdgpu_disable_cu;
+ for (;;) {
+ char *next;
+ int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
+ if (ret < 3) {
+ DRM_ERROR("amdgpu: could not parse disable_cu\n");
+ return;
+ }
+
+ if (se < max_se && sh < max_sh && cu < 16) {
+ DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
+ mask[se * max_sh + sh] |= 1u << cu;
+ } else {
+ DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
+ se, sh, cu);
+ }
+
+ next = strchr(p, ',');
+ if (!next)
+ break;
+ p = next + 1;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index dc06cbda7be6..51321e154c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,4 +27,6 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
+unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 31a676376d73..c93a92a840ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -186,10 +186,8 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
ret = i2c_add_adapter(&i2c->adapter);
- if (ret) {
- DRM_ERROR("Failed to register hw i2c %s\n", name);
+ if (ret)
goto out_free;
- }
} else {
/* set the amdgpu bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 34e35423b78e..f5810f700668 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -33,6 +33,8 @@
#include "amdgpu.h"
#include "atom.h"
+#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+
/*
* IB
* IBs (Indirect Buffers) and areas of GPU accessible memory where
@@ -122,7 +124,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
- struct fence *hwf;
uint64_t ctx;
unsigned i;
@@ -141,7 +142,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
if (!ring->ready) {
- dev_err(adev->dev, "couldn't schedule ib\n");
+ dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
@@ -160,10 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
- r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
- job->gds_base, job->gds_size,
- job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
+ r = amdgpu_vm_flush(ring, job);
if (r) {
amdgpu_ring_undo(ring);
return r;
@@ -193,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_hdp_invalidate)
amdgpu_ring_emit_hdp_invalidate(ring);
- r = amdgpu_fence_emit(ring, &hwf);
+ r = amdgpu_fence_emit(ring, f);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id)
@@ -203,17 +201,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
/* wrap the last IB with fence */
- if (job && job->uf_bo) {
- uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
-
- addr += job->uf_offset;
- amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
+ if (job && job->uf_addr) {
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
AMDGPU_FENCE_FLAG_64BIT);
}
- if (f)
- *f = fence_get(hwf);
-
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
@@ -296,7 +288,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
if (!ring || !ring->ready)
continue;
- r = amdgpu_ring_test_ib(ring);
+ r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
if (r) {
ring->ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 534fc04e80fd..5ebb3f43feb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -40,32 +40,15 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
/* Allocate ring buffer */
if (adev->irq.ih.ring_obj == NULL) {
- r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, NULL, &adev->irq.ih.ring_obj);
+ r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->irq.ih.ring_obj,
+ &adev->irq.ih.gpu_addr,
+ (void **)&adev->irq.ih.ring);
if (r) {
DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->irq.ih.ring_obj,
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->irq.ih.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
- DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(adev->irq.ih.ring_obj,
- (void **)&adev->irq.ih.ring);
- amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
- if (r) {
- DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r);
- return r;
- }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 835a3fa8d8df..278708f5a744 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -383,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
return r;
}
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+{
+ int i, j;
+ for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
+ struct amdgpu_irq_src *src = adev->irq.sources[i];
+ if (!src)
+ continue;
+ for (j = 0; j < src->num_types; j++)
+ amdgpu_irq_update(adev, src, j);
+ }
+}
+
/**
* amdgpu_irq_get - enable interrupt
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index e124b59f39c1..f016464035b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -70,6 +70,7 @@ struct amdgpu_irq {
/* gen irq stuff */
struct irq_domain *domain; /* GPU irq controller domain */
unsigned virq[AMDGPU_MAX_IRQ_SRC_ID];
+ uint32_t srbm_soft_reset;
};
void amdgpu_irq_preinstall(struct drm_device *dev);
@@ -94,6 +95,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index f0dafa514fe4..6674d40eb3ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,21 +28,15 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_free_handler(struct work_struct *ws)
+static void amdgpu_job_timedout(struct amd_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
- amd_sched_job_put(&job->base);
-}
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
-void amdgpu_job_timeout_func(struct work_struct *work)
-{
- struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
- job->base.sched->name,
- (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
- job->ring->fence_drv.sync_seq);
-
- amd_sched_job_put(&job->base);
+ job->base.sched->name,
+ atomic_read(&job->ring->fence_drv.last_seq),
+ job->ring->fence_drv.sync_seq);
+ amdgpu_gpu_reset(job->adev);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
- INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
amdgpu_sync_create(&(*job)->sync);
@@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
return r;
}
-void amdgpu_job_free(struct amdgpu_job *job)
+void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- unsigned i;
struct fence *f;
+ unsigned i;
+
/* use sched fence if available */
- f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
+ f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
- fence_put(job->fence);
+ amdgpu_ib_free(job->adev, &job->ibs[i], f);
+}
- amdgpu_bo_unref(&job->uf_bo);
- amdgpu_sync_free(&job->sync);
+void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+{
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- if (!job->base.use_sched)
- kfree(job);
+ fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
+ kfree(job);
}
-void amdgpu_job_free_func(struct kref *refcount)
+void amdgpu_job_free(struct amdgpu_job *job)
{
- struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
+ amdgpu_job_free_resources(job);
+
+ fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
kfree(job);
}
@@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f)
{
- struct fence *fence;
int r;
job->ring = ring;
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched,
- entity, amdgpu_job_timeout_func,
- amdgpu_job_free_func, owner, &fence);
+ r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
job->owner = owner;
job->ctx = entity->fence_context;
- *f = fence_get(fence);
+ *f = fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
@@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
- &job->base.s_fence->base,
- &job->vm_id, &job->vm_pd_addr);
+ &job->base.s_fence->finished,
+ job);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
@@ -170,29 +167,24 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
}
job = to_amdgpu_job(sched_job);
- r = amdgpu_sync_wait(&job->sync);
- if (r) {
- DRM_ERROR("failed to sync wait (%d)\n", r);
- return NULL;
- }
+ BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, job, &fence);
- if (r) {
+ if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
- goto err;
- }
-err:
- job->fence = fence;
- amdgpu_job_free(job);
+ /* if gpu reset, hw fence will be replaced here */
+ fence_put(job->fence);
+ job->fence = fence_get(fence);
+ amdgpu_job_free_resources(job);
return fence;
}
const struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
- .begin_job = amd_sched_job_begin,
- .finish_job = amd_sched_job_finish,
+ .timedout_job = amdgpu_job_timedout,
+ .free_job = amdgpu_job_free_cb
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d851ea15059f..b78e74048f3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
goto done_free;
- pm_runtime_get_sync(dev->dev);
+ if (amdgpu_device_is_px(dev)) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
amdgpu_amdkfd_device_fini(adev);
@@ -135,13 +138,75 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
out:
- if (r)
+ if (r) {
+ /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
+ if (adev->rmmio && amdgpu_device_is_px(dev))
+ pm_runtime_put_noidle(dev->dev);
amdgpu_driver_unload_kms(dev);
-
+ }
return r;
}
+static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
+ struct drm_amdgpu_query_fw *query_fw,
+ struct amdgpu_device *adev)
+{
+ switch (query_fw->fw_type) {
+ case AMDGPU_INFO_FW_VCE:
+ fw_info->ver = adev->vce.fw_version;
+ fw_info->feature = adev->vce.fb_version;
+ break;
+ case AMDGPU_INFO_FW_UVD:
+ fw_info->ver = adev->uvd.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_GMC:
+ fw_info->ver = adev->mc.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_GFX_ME:
+ fw_info->ver = adev->gfx.me_fw_version;
+ fw_info->feature = adev->gfx.me_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_PFP:
+ fw_info->ver = adev->gfx.pfp_fw_version;
+ fw_info->feature = adev->gfx.pfp_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_CE:
+ fw_info->ver = adev->gfx.ce_fw_version;
+ fw_info->feature = adev->gfx.ce_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLC:
+ fw_info->ver = adev->gfx.rlc_fw_version;
+ fw_info->feature = adev->gfx.rlc_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_MEC:
+ if (query_fw->index == 0) {
+ fw_info->ver = adev->gfx.mec_fw_version;
+ fw_info->feature = adev->gfx.mec_feature_version;
+ } else if (query_fw->index == 1) {
+ fw_info->ver = adev->gfx.mec2_fw_version;
+ fw_info->feature = adev->gfx.mec2_feature_version;
+ } else
+ return -EINVAL;
+ break;
+ case AMDGPU_INFO_FW_SMC:
+ fw_info->ver = adev->pm.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_SDMA:
+ if (query_fw->index >= adev->sdma.num_instances)
+ return -EINVAL;
+ fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
+ fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* Userspace get information ioctl
*/
@@ -227,14 +292,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
type = AMD_IP_BLOCK_TYPE_UVD;
ring_mask = adev->uvd.ring.ready ? 1 : 0;
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ib_size_alignment = 1;
break;
default:
return -EINVAL;
@@ -288,73 +353,29 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
}
case AMDGPU_INFO_TIMESTAMP:
- ui64 = amdgpu_asic_get_gpu_clock_counter(adev);
+ ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_FW_VERSION: {
struct drm_amdgpu_info_firmware fw_info;
+ int ret;
/* We only support one instance of each IP block right now. */
if (info->query_fw.ip_instance != 0)
return -EINVAL;
- switch (info->query_fw.fw_type) {
- case AMDGPU_INFO_FW_VCE:
- fw_info.ver = adev->vce.fw_version;
- fw_info.feature = adev->vce.fb_version;
- break;
- case AMDGPU_INFO_FW_UVD:
- fw_info.ver = adev->uvd.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_GMC:
- fw_info.ver = adev->mc.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_GFX_ME:
- fw_info.ver = adev->gfx.me_fw_version;
- fw_info.feature = adev->gfx.me_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_PFP:
- fw_info.ver = adev->gfx.pfp_fw_version;
- fw_info.feature = adev->gfx.pfp_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_CE:
- fw_info.ver = adev->gfx.ce_fw_version;
- fw_info.feature = adev->gfx.ce_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_RLC:
- fw_info.ver = adev->gfx.rlc_fw_version;
- fw_info.feature = adev->gfx.rlc_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_MEC:
- if (info->query_fw.index == 0) {
- fw_info.ver = adev->gfx.mec_fw_version;
- fw_info.feature = adev->gfx.mec_feature_version;
- } else if (info->query_fw.index == 1) {
- fw_info.ver = adev->gfx.mec2_fw_version;
- fw_info.feature = adev->gfx.mec2_feature_version;
- } else
- return -EINVAL;
- break;
- case AMDGPU_INFO_FW_SMC:
- fw_info.ver = adev->pm.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_SDMA:
- if (info->query_fw.index >= adev->sdma.num_instances)
- return -EINVAL;
- fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
- fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
- break;
- default:
- return -EINVAL;
- }
+ ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
+ if (ret)
+ return ret;
+
return copy_to_user(out, &fw_info,
min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
}
case AMDGPU_INFO_NUM_BYTES_MOVED:
ui64 = atomic64_read(&adev->num_bytes_moved);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+ case AMDGPU_INFO_NUM_EVICTIONS:
+ ui64 = atomic64_read(&adev->num_evictions);
+ return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
ui64 = atomic64_read(&adev->vram_usage);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
@@ -566,6 +587,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+ amdgpu_uvd_free_handles(adev, file_priv);
+ amdgpu_vce_free_handles(adev, file_priv);
+
amdgpu_vm_fini(adev, &fpriv->vm);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -590,10 +614,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
void amdgpu_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct amdgpu_device *adev = dev->dev_private;
-
- amdgpu_uvd_free_handles(adev, file_priv);
- amdgpu_vce_free_handles(adev, file_priv);
}
/*
@@ -756,3 +776,130 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_amdgpu_info_firmware fw_info;
+ struct drm_amdgpu_query_fw query_fw;
+ int ret, i;
+
+ /* VCE */
+ query_fw.fw_type = AMDGPU_INFO_FW_VCE;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* UVD */
+ query_fw.fw_type = AMDGPU_INFO_FW_UVD;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* GMC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GMC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* ME */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* PFP */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* CE */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MEC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
+ query_fw.index = 0;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MEC2 */
+ if (adev->asic_type == CHIP_KAVERI ||
+ (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
+ query_fw.index = 1;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+ }
+
+ /* SMC */
+ query_fw.fw_type = AMDGPU_INFO_FW_SMC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* SDMA */
+ query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ query_fw.index = i;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
+ i, fw_info.feature, fw_info.ver);
+ }
+
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_firmware_info_list[] = {
+ {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
+};
+#endif
+
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
+ ARRAY_SIZE(amdgpu_firmware_info_list));
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6b1d7d306564..7b0eff7d060b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -39,6 +39,8 @@
#include <drm/drm_plane_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/hrtimer.h>
+#include "amdgpu_irq.h"
struct amdgpu_bo;
struct amdgpu_device;
@@ -339,6 +341,8 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
+ struct hrtimer vblank_timer;
+ enum amdgpu_interrupt_state vsync_timer_enabled;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -587,10 +591,10 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
void amdgpu_print_display_setup(struct drm_device *dev);
int amdgpu_modeset_create_props(struct amdgpu_device *adev);
int amdgpu_crtc_set_config(struct drm_mode_set *set);
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags);
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 7ecea83ce453..b17734e0ecc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -44,14 +44,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev);
static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
struct ttm_mem_reg *mem)
{
- u64 ret = 0;
- if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
- ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
- adev->mc.visible_vram_size ?
- adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
- mem->size;
- }
- return ret;
+ if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
+ return 0;
+
+ return ((mem->start << PAGE_SHIFT) + mem->size) >
+ adev->mc.visible_vram_size ?
+ adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
+ mem->size;
}
static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
@@ -99,6 +98,11 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
+ if (!list_empty(&bo->shadow_list)) {
+ mutex_lock(&bo->adev->shadow_list_lock);
+ list_del_init(&bo->shadow_list);
+ mutex_unlock(&bo->adev->shadow_list_lock);
+ }
kfree(bo->metadata);
kfree(bo);
}
@@ -112,84 +116,93 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
struct ttm_placement *placement,
- struct ttm_place *placements,
+ struct ttm_place *places,
u32 domain, u64 flags)
{
- u32 c = 0, i;
-
- placement->placement = placements;
- placement->busy_placement = placements;
+ u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+ unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
- adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- placements[c].fpfn =
- adev->mc.visible_vram_size >> PAGE_SHIFT;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
+ !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+ adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+ places[c].fpfn = visible_pfn;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
+ TTM_PL_FLAG_TOPDOWN;
+ c++;
}
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
- if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
- placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ places[c].lpfn = visible_pfn;
+ else
+ places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+ c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_TT;
+ if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
- } else {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
- }
+ else
+ places[c].flags |= TTM_PL_FLAG_CACHED;
+ c++;
}
if (domain & AMDGPU_GEM_DOMAIN_CPU) {
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_SYSTEM;
+ if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
- } else {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
- }
+ else
+ places[c].flags |= TTM_PL_FLAG_CACHED;
+ c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GDS) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- AMDGPU_PL_FLAG_GDS;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+ c++;
}
+
if (domain & AMDGPU_GEM_DOMAIN_GWS) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- AMDGPU_PL_FLAG_GWS;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+ c++;
}
+
if (domain & AMDGPU_GEM_DOMAIN_OA) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- AMDGPU_PL_FLAG_OA;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+ c++;
}
if (!c) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ c++;
}
+
placement->num_placement = c;
- placement->num_busy_placement = c;
+ placement->placement = places;
- for (i = 0; i < c; i++) {
- if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- (placements[i].flags & TTM_PL_FLAG_VRAM) &&
- !placements[i].fpfn)
- placements[i].lpfn =
- adev->mc.visible_vram_size >> PAGE_SHIFT;
- else
- placements[i].lpfn = 0;
- }
+ placement->num_busy_placement = c;
+ placement->busy_placement = places;
}
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
@@ -211,6 +224,69 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
bo->placement.busy_placement = bo->placements;
}
+/**
+ * amdgpu_bo_create_kernel - create BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: resulting BO
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
+{
+ int r;
+
+ r = amdgpu_bo_create(adev, size, align, true, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL, bo_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(*bo_ptr, false);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
+ goto error_free;
+ }
+
+ r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+ goto error_unreserve;
+ }
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
+ goto error_unreserve;
+ }
+ }
+
+ amdgpu_bo_unreserve(*bo_ptr);
+
+ return 0;
+
+error_unreserve:
+ amdgpu_bo_unreserve(*bo_ptr);
+
+error_free:
+ amdgpu_bo_unref(bo_ptr);
+
+ return r;
+}
+
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
@@ -250,6 +326,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
}
bo->adev = adev;
INIT_LIST_HEAD(&bo->list);
+ INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
@@ -277,11 +354,79 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
if (unlikely(r != 0)) {
return r;
}
+
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+ bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+ struct fence *fence;
+
+ if (adev->mman.buffer_funcs_ring == NULL ||
+ !adev->mman.buffer_funcs_ring->ready) {
+ r = -EBUSY;
+ goto fail_free;
+ }
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ goto fail_free;
+
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r != 0))
+ goto fail_unreserve;
+
+ amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ amdgpu_bo_fence(bo, fence, false);
+ amdgpu_bo_unreserve(bo);
+ fence_put(bo->tbo.moving);
+ bo->tbo.moving = fence_get(fence);
+ fence_put(fence);
+ }
*bo_ptr = bo;
trace_amdgpu_bo_create(bo);
return 0;
+
+fail_unreserve:
+ amdgpu_bo_unreserve(bo);
+fail_free:
+ amdgpu_bo_unref(&bo);
+ return r;
+}
+
+static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+ unsigned long size, int byte_align,
+ struct amdgpu_bo *bo)
+{
+ struct ttm_placement placement = {0};
+ struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ int r;
+
+ if (bo->shadow)
+ return 0;
+
+ bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
+ memset(&placements, 0,
+ (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+ amdgpu_ttm_placement_init(adev, &placement,
+ placements, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+
+ r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+ NULL, &placement,
+ bo->tbo.resv,
+ &bo->shadow);
+ if (!r) {
+ bo->shadow->parent = amdgpu_bo_ref(bo);
+ mutex_lock(&adev->shadow_list_lock);
+ list_add_tail(&bo->shadow_list, &adev->shadow_list);
+ mutex_unlock(&adev->shadow_list_lock);
+ }
+
+ return r;
}
int amdgpu_bo_create(struct amdgpu_device *adev,
@@ -293,6 +438,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
{
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ int r;
memset(&placements, 0,
(AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
@@ -300,9 +446,83 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
amdgpu_ttm_placement_init(adev, &placement,
placements, domain, flags);
- return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
- domain, flags, sg, &placement,
- resv, bo_ptr);
+ r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+ domain, flags, sg, &placement,
+ resv, bo_ptr);
+ if (r)
+ return r;
+
+ if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
+ r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
+ if (r)
+ amdgpu_bo_unref(bo_ptr);
+ }
+
+ return r;
+}
+
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence,
+ bool direct)
+
+{
+ struct amdgpu_bo *shadow = bo->shadow;
+ uint64_t bo_addr, shadow_addr;
+ int r;
+
+ if (!shadow)
+ return -EINVAL;
+
+ bo_addr = amdgpu_bo_gpu_offset(bo);
+ shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+ r = reservation_object_reserve_shared(bo->tbo.resv);
+ if (r)
+ goto err;
+
+ r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
+ amdgpu_bo_size(bo), resv, fence,
+ direct);
+ if (!r)
+ amdgpu_bo_fence(bo, *fence, true);
+
+err:
+ return r;
+}
+
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence,
+ bool direct)
+
+{
+ struct amdgpu_bo *shadow = bo->shadow;
+ uint64_t bo_addr, shadow_addr;
+ int r;
+
+ if (!shadow)
+ return -EINVAL;
+
+ bo_addr = amdgpu_bo_gpu_offset(bo);
+ shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+ r = reservation_object_reserve_shared(bo->tbo.resv);
+ if (r)
+ goto err;
+
+ r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
+ amdgpu_bo_size(bo), resv, fence,
+ direct);
+ if (!r)
+ amdgpu_bo_fence(bo, *fence, true);
+
+err:
+ return r;
}
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -380,16 +600,17 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
if (bo->pin_count) {
+ uint32_t mem_type = bo->tbo.mem.mem_type;
+
+ if (domain != amdgpu_mem_type_to_domain(mem_type))
+ return -EINVAL;
+
bo->pin_count++;
if (gpu_addr)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
if (max_offset != 0) {
- u64 domain_start;
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
- domain_start = bo->adev->mc.vram_start;
- else
- domain_start = bo->adev->mc.gtt_start;
+ u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
WARN_ON_ONCE(max_offset <
(amdgpu_bo_gpu_offset(bo) - domain_start));
}
@@ -401,7 +622,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
- (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
+ (!max_offset || max_offset >
+ bo->adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
bo->adev->mc.visible_vram_size))
return -EINVAL;
@@ -420,19 +642,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (likely(r == 0)) {
- bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- bo->adev->vram_pin_size += amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
- } else
- bo->adev->gart_pin_size += amdgpu_bo_size(bo);
- } else {
+ if (unlikely(r)) {
dev_err(bo->adev->dev, "%p pin failed\n", bo);
+ goto error;
+ }
+
+ bo->pin_count = 1;
+ if (gpu_addr != NULL)
+ *gpu_addr = amdgpu_bo_gpu_offset(bo);
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+ bo->adev->gart_pin_size += amdgpu_bo_size(bo);
}
+
+error:
return r;
}
@@ -457,16 +683,20 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (likely(r == 0)) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
- } else
- bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
- } else {
+ if (unlikely(r)) {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+ goto error;
+ }
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ } else {
+ bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
}
+
+error:
return r;
}
@@ -589,6 +819,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_bo *rbo;
+ struct ttm_mem_reg *old_mem = &bo->mem;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
@@ -602,6 +833,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
/* move_notify is called before move happens */
amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+
+ trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -634,7 +867,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
for (i = 0; i < abo->placement.num_placement; i++) {
/* Force into visible VRAM */
if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
+ (!abo->placements[i].lpfn ||
+ abo->placements[i].lpfn > lpfn))
abo->placements[i].lpfn = lpfn;
}
r = ttm_bo_validate(bo, &abo->placement, false, false);
@@ -671,3 +905,21 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
else
reservation_object_add_excl_fence(resv, fence);
}
+
+/**
+ * amdgpu_bo_gpu_offset - return GPU offset of bo
+ * @bo: amdgpu object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+{
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+ WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+ !bo->pin_count);
+
+ return bo->tbo.offset;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index bdb01d932548..b6a27390ef88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -85,21 +85,6 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
ttm_bo_unreserve(&bo->tbo);
}
-/**
- * amdgpu_bo_gpu_offset - return GPU offset of bo
- * @bo: amdgpu object for which we query the offset
- *
- * Returns current GPU offset of the object.
- *
- * Note: object should either be pinned or reserved when calling this
- * function, it might be useful to add check for this for debugging.
- */
-static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
-{
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
- return bo->tbo.offset;
-}
-
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
{
return bo->tbo.num_pages << PAGE_SHIFT;
@@ -139,6 +124,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct ttm_placement *placement,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
@@ -165,6 +154,19 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared);
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence, bool direct);
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence,
+ bool direct);
+
/*
* sub allocation
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 0e13d80d2a95..d4ec3cb187a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
char *table = NULL;
- int size, i;
+ int size;
if (adev->pp_enabled)
size = amdgpu_dpm_get_pp_table(adev, &table);
@@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
- for (i = 0; i < size; i++) {
- sprintf(buf + i, "%02x", table[i]);
- }
- sprintf(buf + i, "\n");
+ memcpy(buf, table, size);
return size;
}
@@ -347,6 +344,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
return size;
}
@@ -363,7 +362,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
@@ -377,6 +378,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
fail:
return count;
}
@@ -391,6 +394,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
return size;
}
@@ -407,7 +412,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
@@ -421,6 +428,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
fail:
return count;
}
@@ -435,6 +444,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
return size;
}
@@ -451,7 +462,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
@@ -465,6 +478,100 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t value = 0;
+
+ if (adev->pp_enabled)
+ value = amdgpu_dpm_get_sclk_od(adev);
+ else if (adev->pm.funcs->get_sclk_od)
+ value = adev->pm.funcs->get_sclk_od(adev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long int value;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled) {
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+ } else if (adev->pm.funcs->set_sclk_od) {
+ adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t value = 0;
+
+ if (adev->pp_enabled)
+ value = amdgpu_dpm_get_mclk_od(adev);
+ else if (adev->pm.funcs->get_mclk_od)
+ value = adev->pm.funcs->get_mclk_od(adev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long int value;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled) {
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+ } else if (adev->pm.funcs->set_mclk_od) {
+ adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
+
fail:
return count;
}
@@ -490,6 +597,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
amdgpu_get_pp_dpm_pcie,
amdgpu_set_pp_dpm_pcie);
+static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_sclk_od,
+ amdgpu_set_pp_sclk_od);
+static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_mclk_od,
+ amdgpu_set_pp_mclk_od);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -990,54 +1103,46 @@ force:
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled)
+ if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+ /* enable/disable UVD */
+ mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_uvd(adev, !enable);
- else {
- if (adev->pm.funcs->powergate_uvd) {
+ mutex_unlock(&adev->pm.mutex);
+ } else {
+ if (enable) {
mutex_lock(&adev->pm.mutex);
- /* enable/disable UVD */
- amdgpu_dpm_powergate_uvd(adev, !enable);
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
mutex_unlock(&adev->pm.mutex);
} else {
- if (enable) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- mutex_unlock(&adev->pm.mutex);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.uvd_active = false;
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_pm_compute_clocks(adev);
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.uvd_active = false;
+ mutex_unlock(&adev->pm.mutex);
}
-
+ amdgpu_pm_compute_clocks(adev);
}
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled)
+ if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+ /* enable/disable VCE */
+ mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_vce(adev, !enable);
- else {
- if (adev->pm.funcs->powergate_vce) {
+ mutex_unlock(&adev->pm.mutex);
+ } else {
+ if (enable) {
mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_vce(adev, !enable);
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
- if (enable) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
- mutex_unlock(&adev->pm.mutex);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.vce_active = false;
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_pm_compute_clocks(adev);
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.vce_active = false;
+ mutex_unlock(&adev->pm.mutex);
}
+ amdgpu_pm_compute_clocks(adev);
}
}
@@ -1108,22 +1213,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_table\n");
return ret;
}
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_pcie\n");
- return ret;
- }
}
+
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_sclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_mclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_pcie\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_sclk_od\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_mclk_od\n");
+ return ret;
+ }
+
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1146,10 +1263,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_cur_state);
device_remove_file(adev->dev, &dev_attr_pp_force_state);
device_remove_file(adev->dev, &dev_attr_pp_table);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
}
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
+ device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 82256558e0f5..545074479e1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -52,6 +52,9 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
pp_init->chip_family = adev->family;
pp_init->chip_id = adev->asic_type;
pp_init->device = amdgpu_cgs_create_device(adev);
+ pp_init->rev_id = adev->pdev->revision;
+ pp_init->sub_sys_id = adev->pdev->subsystem_device;
+ pp_init->sub_vendor_id = adev->pdev->subsystem_vendor;
ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init);
@@ -105,11 +108,10 @@ static int amdgpu_pp_early_init(void *handle)
break;
case CHIP_TONGA:
case CHIP_FIJI:
- adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
- break;
+ case CHIP_TOPAZ:
case CHIP_CARRIZO:
case CHIP_STONEY:
- adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+ adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
break;
/* These chips don't have powerplay implemenations */
case CHIP_BONAIRE:
@@ -117,7 +119,6 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
- case CHIP_TOPAZ:
default:
adev->pp_enabled = false;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 870f9494252c..242ba04bfde6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -28,6 +28,7 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -48,6 +49,7 @@
*/
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@@ -73,6 +75,10 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
ring->count_dw = ndw;
ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+
return 0;
}
@@ -125,6 +131,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
mb();
amdgpu_ring_set_wptr(ring);
+
+ if (ring->funcs->end_use)
+ ring->funcs->end_use(ring);
}
/**
@@ -137,78 +146,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
void amdgpu_ring_undo(struct amdgpu_ring *ring)
{
ring->wptr = ring->wptr_old;
-}
-
-/**
- * amdgpu_ring_backup - Back up the content of a ring
- *
- * @ring: the ring we want to back up
- *
- * Saves all unprocessed commits from a ring, returns the number of dwords saved.
- */
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
- uint32_t **data)
-{
- unsigned size, ptr, i;
-
- *data = NULL;
-
- if (ring->ring_obj == NULL)
- return 0;
-
- /* it doesn't make sense to save anything if all fences are signaled */
- if (!amdgpu_fence_count_emitted(ring))
- return 0;
-
- ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
- size = ring->wptr + (ring->ring_size / 4);
- size -= ptr;
- size &= ring->ptr_mask;
- if (size == 0)
- return 0;
-
- /* and then save the content of the ring */
- *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
- if (!*data)
- return 0;
- for (i = 0; i < size; ++i) {
- (*data)[i] = ring->ring[ptr++];
- ptr &= ring->ptr_mask;
- }
- return size;
-}
-
-/**
- * amdgpu_ring_restore - append saved commands to the ring again
- *
- * @ring: ring to append commands to
- * @size: number of dwords we want to write
- * @data: saved commands
- *
- * Allocates space on the ring and restore the previously saved commands.
- */
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
- unsigned size, uint32_t *data)
-{
- int i, r;
-
- if (!size || !data)
- return 0;
-
- /* restore the saved ring content */
- r = amdgpu_ring_alloc(ring, size);
- if (r)
- return r;
-
- for (i = 0; i < size; ++i) {
- amdgpu_ring_write(ring, data[i]);
- }
-
- amdgpu_ring_commit(ring);
- kfree(data);
- return 0;
+ if (ring->funcs->end_use)
+ ring->funcs->end_use(ring);
}
/**
@@ -260,14 +200,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
- return r;
- }
- ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
- ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
@@ -276,7 +208,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
- spin_lock_init(&ring->fence_lock);
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
dev_err(adev->dev, "failed initializing fences (%d).\n", r);
@@ -291,30 +222,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, NULL, &ring->ring_obj);
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = amdgpu_bo_reserve(ring->ring_obj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
- &ring->gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(ring->ring_obj);
- dev_err(adev->dev, "(%d) ring pin failed\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(ring->ring_obj,
- (void **)&ring->ring);
- amdgpu_bo_unreserve(ring->ring_obj);
- if (r) {
- dev_err(adev->dev, "(%d) ring map failed\n", r);
- return r;
- }
+ memset((void *)ring->ring, 0, ring->ring_size);
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->max_dw = max_dw;
@@ -347,7 +264,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
if (ring_obj) {
r = amdgpu_bo_reserve(ring_obj, false);
@@ -358,6 +274,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
}
amdgpu_bo_unref(&ring_obj);
}
+ amdgpu_debugfs_ring_fini(ring);
}
/*
@@ -365,57 +282,62 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
+/* Layout of file is 12 bytes consisting of
+ * - rptr
+ * - wptr
+ * - driver's copy of wptr
+ *
+ * followed by n-words of ring data
+ */
+static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- int roffset = (unsigned long)node->info_ent->data;
- struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
- uint32_t rptr, wptr, rptr_next;
- unsigned i;
-
- wptr = amdgpu_ring_get_wptr(ring);
- seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
-
- rptr = amdgpu_ring_get_rptr(ring);
- rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
- seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
-
- seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
- ring->wptr, ring->wptr);
-
- if (!ring->ready)
- return 0;
-
- /* print 8 dw before current rptr as often it's the last executed
- * packet that is the root issue
- */
- i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- while (i != rptr) {
- seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (i == rptr)
- seq_puts(m, " *");
- if (i == rptr_next)
- seq_puts(m, " #");
- seq_puts(m, "\n");
- i = (i + 1) & ring->ptr_mask;
+ struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
+ int r, i;
+ uint32_t value, result, early[3];
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ result = 0;
+
+ if (*pos < 12) {
+ early[0] = amdgpu_ring_get_rptr(ring);
+ early[1] = amdgpu_ring_get_wptr(ring);
+ early[2] = ring->wptr;
+ for (i = *pos / 4; i < 3 && size; i++) {
+ r = put_user(early[i], (uint32_t *)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
}
- while (i != wptr) {
- seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (i == rptr)
- seq_puts(m, " *");
- if (i == rptr_next)
- seq_puts(m, " #");
- seq_puts(m, "\n");
- i = (i + 1) & ring->ptr_mask;
+
+ while (size) {
+ if (*pos >= (ring->ring_size + 12))
+ return result;
+
+ value = ring->ring[(*pos - 12)/4];
+ r = put_user(value, (uint32_t*)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
}
- return 0;
+
+ return result;
}
-static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
-static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
+static const struct file_operations amdgpu_debugfs_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_ring_read,
+ .llseek = default_llseek
+};
#endif
@@ -423,28 +345,27 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
- unsigned i;
- struct drm_info_list *info;
- char *name;
-
- for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
- info = &amdgpu_debugfs_ring_info_list[i];
- if (!info->data)
- break;
- }
-
- if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
- return -ENOSPC;
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+ char name[32];
- name = &amdgpu_debugfs_ring_names[i][0];
sprintf(name, "amdgpu_ring_%s", ring->name);
- info->name = name;
- info->show = amdgpu_debugfs_ring_info;
- info->driver_features = 0;
- info->data = (void*)(uintptr_t)offset;
- return amdgpu_debugfs_add_files(adev, info, 1);
+ ent = debugfs_create_file(name,
+ S_IFREG | S_IRUGO, root,
+ ring, &amdgpu_debugfs_ring_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ i_size_write(ent->d_inode, ring->ring_size + 12);
+ ring->ent = ent;
#endif
return 0;
}
+
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove(ring->ent);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 48618ee324eb..d8af37a845f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -428,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
soffset, eoffset, eoffset - soffset);
if (i->fence)
- seq_printf(m, " protected by 0x%08x on context %d",
+ seq_printf(m, " protected by 0x%08x on context %llu",
i->fence->seqno, i->fence->context);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 34a92808bbd4..5c8d3022fb87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -223,13 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
}
/**
- * amdgpu_sync_is_idle - test if all fences are signaled
+ * amdgpu_sync_peek_fence - get the next fence not signaled yet
*
* @sync: the sync object
+ * @ring: optional ring to use for test
*
- * Returns true if all fences in the sync object are signaled.
+ * Returns the next fence not signaled yet without removing it from the sync
+ * object.
*/
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
@@ -237,6 +240,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct fence *f = e->fence;
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+ if (ring && s_fence) {
+ /* For fences from the same ring it is sufficient
+ * when they are scheduled.
+ */
+ if (s_fence->sched == &ring->sched) {
+ if (fence_is_signaled(&s_fence->scheduled))
+ continue;
+
+ return &s_fence->scheduled;
+ }
+ }
if (fence_is_signaled(f)) {
hash_del(&e->node);
@@ -245,58 +261,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
continue;
}
- return false;
+ return f;
}
- return true;
+ return NULL;
}
/**
- * amdgpu_sync_cycle_fences - move fences from one sync object into another
+ * amdgpu_sync_get_fence - get the next fence from the sync object
*
- * @dst: the destination sync object
- * @src: the source sync object
- * @fence: fence to add to source
+ * @sync: sync object to use
*
- * Remove all fences from source and put them into destination and add
- * fence as new one into source.
+ * Get and removes the next fence from the sync object not signaled yet.
*/
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
- struct fence *fence)
-{
- struct amdgpu_sync_entry *e, *newone;
- struct hlist_node *tmp;
- int i;
-
- /* Allocate the new entry before moving the old ones */
- newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
- if (!newone)
- return -ENOMEM;
-
- hash_for_each_safe(src->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
-
- hash_del(&e->node);
- if (fence_is_signaled(f)) {
- fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
- if (amdgpu_sync_add_later(dst, f)) {
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
- hash_add(dst->fences, &e->node, f->context);
- }
-
- hash_add(src->fences, &newone->node, fence->context);
- newone->fence = fence_get(fence);
-
- return 0;
-}
-
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
@@ -319,25 +296,6 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
return NULL;
}
-int amdgpu_sync_wait(struct amdgpu_sync *sync)
-{
- struct amdgpu_sync_entry *e;
- struct hlist_node *tmp;
- int i, r;
-
- hash_for_each_safe(sync->fences, i, tmp, e, node) {
- r = fence_wait(e->fence, false);
- if (r)
- return r;
-
- hash_del(&e->node);
- fence_put(e->fence);
- kmem_cache_free(amdgpu_sync_slab, e);
- }
-
- return 0;
-}
-
/**
* amdgpu_sync_free - free the sync object
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 05a53f4fc334..b827c75e95de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
- size, NULL, &fence);
+ size, NULL, &fence, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
- size, NULL, &fence);
+ size, NULL, &fence, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 26a5f4acf584..0d8d65eb46cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -11,19 +11,68 @@
#define TRACE_SYSTEM amdgpu
#define TRACE_INCLUDE_FILE amdgpu_trace
+TRACE_EVENT(amdgpu_mm_rreg,
+ TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+ TP_ARGS(did, reg, value),
+ TP_STRUCT__entry(
+ __field(unsigned, did)
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->did = did;
+ __entry->reg = reg;
+ __entry->value = value;
+ ),
+ TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ (unsigned long)__entry->did,
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_mm_wreg,
+ TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+ TP_ARGS(did, reg, value),
+ TP_STRUCT__entry(
+ __field(unsigned, did)
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->did = did;
+ __entry->reg = reg;
+ __entry->value = value;
+ ),
+ TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ (unsigned long)__entry->did,
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
TRACE_EVENT(amdgpu_bo_create,
TP_PROTO(struct amdgpu_bo *bo),
TP_ARGS(bo),
TP_STRUCT__entry(
__field(struct amdgpu_bo *, bo)
__field(u32, pages)
+ __field(u32, type)
+ __field(u32, prefer)
+ __field(u32, allow)
+ __field(u32, visible)
),
TP_fast_assign(
__entry->bo = bo;
__entry->pages = bo->tbo.num_pages;
+ __entry->type = bo->tbo.mem.mem_type;
+ __entry->prefer = bo->prefered_domains;
+ __entry->allow = bo->allowed_domains;
+ __entry->visible = bo->flags;
),
- TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+
+ TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d",
+ __entry->bo, __entry->pages, __entry->type,
+ __entry->prefer, __entry->allow, __entry->visible)
);
TRACE_EVENT(amdgpu_cs,
@@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
- __entry->fence = &job->base.s_fence->base;
+ __entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
@@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
- __entry->fence = &job->base.s_fence->base;
+ __entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
@@ -100,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job,
TRACE_EVENT(amdgpu_vm_grab_id,
- TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
- uint64_t pd_addr),
- TP_ARGS(vm, ring, vmid, pd_addr),
+ TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
+ TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm)
__field(u32, ring)
__field(u32, vmid)
__field(u64, pd_addr)
+ __field(u32, needs_flush)
),
TP_fast_assign(
__entry->vm = vm;
__entry->ring = ring;
- __entry->vmid = vmid;
- __entry->pd_addr = pd_addr;
+ __entry->vmid = job->vm_id;
+ __entry->pd_addr = job->vm_pd_addr;
+ __entry->needs_flush = job->vm_needs_flush;
),
- TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
- __entry->ring, __entry->vmid, __entry->pd_addr)
+ TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
+ __entry->vm, __entry->ring, __entry->vmid,
+ __entry->pd_addr, __entry->needs_flush)
);
TRACE_EVENT(amdgpu_vm_bo_map,
@@ -244,13 +295,55 @@ TRACE_EVENT(amdgpu_bo_list_set,
TP_STRUCT__entry(
__field(struct amdgpu_bo_list *, list)
__field(struct amdgpu_bo *, bo)
+ __field(u64, bo_size)
),
TP_fast_assign(
__entry->list = list;
__entry->bo = bo;
+ __entry->bo_size = amdgpu_bo_size(bo);
),
- TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
+ TP_printk("list=%p, bo=%p, bo_size = %Ld",
+ __entry->list,
+ __entry->bo,
+ __entry->bo_size)
+);
+
+TRACE_EVENT(amdgpu_cs_bo_status,
+ TP_PROTO(uint64_t total_bo, uint64_t total_size),
+ TP_ARGS(total_bo, total_size),
+ TP_STRUCT__entry(
+ __field(u64, total_bo)
+ __field(u64, total_size)
+ ),
+
+ TP_fast_assign(
+ __entry->total_bo = total_bo;
+ __entry->total_size = total_size;
+ ),
+ TP_printk("total bo size = %Ld, total bo count = %Ld",
+ __entry->total_bo, __entry->total_size)
+);
+
+TRACE_EVENT(amdgpu_ttm_bo_move,
+ TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
+ TP_ARGS(bo, new_placement, old_placement),
+ TP_STRUCT__entry(
+ __field(struct amdgpu_bo *, bo)
+ __field(u64, bo_size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->bo_size = amdgpu_bo_size(bo);
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ ),
+ TP_printk("bo=%p from:%d to %d with size = %Ld",
+ __entry->bo, __entry->old_placement,
+ __entry->new_placement, __entry->bo_size)
);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3b9053af4762..5447973483ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -34,6 +34,7 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_memory.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <linux/seq_file.h>
@@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
ttm_mem_global_release(ref->object);
}
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
struct amdgpu_ring *ring;
@@ -256,10 +257,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
- old_start += adev->mc.vram_start;
- break;
case TTM_PL_TT:
- old_start += adev->mc.gtt_start;
+ old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -267,10 +266,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
}
switch (new_mem->mem_type) {
case TTM_PL_VRAM:
- new_start += adev->mc.vram_start;
- break;
case TTM_PL_TT:
- new_start += adev->mc.gtt_start;
+ new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -285,10 +282,11 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_copy_buffer(ring, old_start, new_start,
new_mem->num_pages * PAGE_SIZE, /* bytes */
- bo->resv, &fence);
- /* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, fence,
- evict, no_wait_gpu, new_mem);
+ bo->resv, &fence, false);
+ if (r)
+ return r;
+
+ r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
fence_put(fence);
return r;
}
@@ -334,7 +332,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -367,7 +365,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -396,6 +394,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
return -EINVAL;
adev = amdgpu_get_adev(bo->bdev);
+
+ /* remember the eviction */
+ if (evict)
+ atomic64_inc(&adev->num_evictions);
+
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
amdgpu_move_null(bo, new_mem);
return 0;
@@ -429,7 +432,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
if (r) {
return r;
}
@@ -943,6 +946,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
struct list_head *res = lru->lru[tbo->mem.mem_type];
lru->lru[tbo->mem.mem_type] = &tbo->lru;
+ while ((++lru)->lru[tbo->mem.mem_type] == res)
+ lru->lru[tbo->mem.mem_type] = &tbo->lru;
return res;
}
@@ -953,6 +958,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
struct list_head *res = lru->swap_lru;
lru->swap_lru = &tbo->swap;
+ while ((++lru)->swap_lru == res)
+ lru->swap_lru = &tbo->swap;
return res;
}
@@ -980,10 +987,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
unsigned i, j;
int r;
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
adev->mman.bo_global_ref.ref.object,
@@ -1004,6 +1007,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
}
+ for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+ adev->mman.guard.lru[j] = NULL;
+ adev->mman.guard.swap_lru = NULL;
+
adev->mman.initialized = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT);
@@ -1144,7 +1151,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence)
+ struct fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1188,8 +1195,79 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
+ if (direct_submit) {
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
+ NULL, NULL, fence);
+ job->fence = fence_get(*fence);
+ if (r)
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ amdgpu_job_free(job);
+ } else {
+ r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ if (r)
+ goto error_free;
+ }
+
+ return r;
+
+error_free:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct reservation_object *resv,
+ struct fence **fence)
+{
+ struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+ uint32_t max_bytes, byte_count;
+ uint64_t dst_offset;
+ unsigned int num_loops, num_dw;
+ unsigned int i;
+ int r;
+
+ byte_count = bo->tbo.num_pages << PAGE_SHIFT;
+ max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+ num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+ num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+ /* for IB padding */
+ while (num_dw & 0x7)
+ num_dw++;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ if (r)
+ return r;
+
+ if (resv) {
+ r = amdgpu_sync_resv(adev, &job->sync, resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
+ if (r) {
+ DRM_ERROR("sync failed (%d).\n", r);
+ goto error_free;
+ }
+ }
+
+ dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
+ for (i = 0; i < num_loops; i++) {
+ uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+ dst_offset, cur_size_in_bytes);
+
+ dst_offset += cur_size_in_bytes;
+ byte_count -= cur_size_in_bytes;
+ }
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
@@ -1380,3 +1458,8 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
#endif
}
+
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
+{
+ return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
new file mode 100644
index 000000000000..72f6bfc15d8f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_TTM_H__
+#define __AMDGPU_TTM_H__
+
+#include "gpu_scheduler.h"
+
+#define AMDGPU_PL_GDS TTM_PL_PRIV0
+#define AMDGPU_PL_GWS TTM_PL_PRIV1
+#define AMDGPU_PL_OA TTM_PL_PRIV2
+
+#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0
+#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1
+#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2
+
+#define AMDGPU_TTM_LRU_SIZE 20
+
+struct amdgpu_mman_lru {
+ struct list_head *lru[TTM_NUM_MEM_TYPES];
+ struct list_head *swap_lru;
+};
+
+struct amdgpu_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_device bdev;
+ bool mem_global_referenced;
+ bool initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *vram;
+ struct dentry *gtt;
+#endif
+
+ /* buffer handling */
+ const struct amdgpu_buffer_funcs *buffer_funcs;
+ struct amdgpu_ring *buffer_funcs_ring;
+ /* Scheduler entity for buffer moves */
+ struct amd_sched_entity entity;
+
+ /* custom LRU management */
+ struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
+ /* guard for log2_size array, don't add anything in between */
+ struct amdgpu_mman_lru guard;
+};
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ uint32_t byte_count,
+ struct reservation_object *resv,
+ struct fence **fence, bool direct_submit);
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct reservation_object *resv,
+ struct fence **fence);
+
+int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d9c88d13f8db..cc766cc53a87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -40,9 +40,16 @@
#include "uvd/uvd_4_2_d.h"
/* 1 second timeout */
-#define UVD_IDLE_TIMEOUT_MS 1000
+#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+/* Firmware versions for VI */
+#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
+#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
+#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
+#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
+
/* Polaris10/11 firmware version */
-#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
+#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -92,7 +99,6 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
-static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
@@ -195,39 +201,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
- r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, &adev->uvd.vcpu_bo);
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
+ &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (r) {
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
- dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
- return r;
- }
-
- r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->uvd.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
- dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
- if (r) {
- dev_err(adev->dev, "(%d) UVD map failed\n", r);
- return r;
- }
-
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-
ring = &adev->uvd.ring;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
@@ -246,6 +227,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
+ break;
+ case CHIP_CARRIZO:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
+ break;
+ case CHIP_FIJI:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
+ break;
+ case CHIP_STONEY:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
+ break;
+ default:
+ adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
+ }
+
return 0;
}
@@ -300,7 +298,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.saved_bo)
return -ENOMEM;
- memcpy(adev->uvd.saved_bo, ptr, size);
+ memcpy_fromio(adev->uvd.saved_bo, ptr, size);
return 0;
}
@@ -317,7 +315,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr = adev->uvd.cpu_addr;
if (adev->uvd.saved_bo != NULL) {
- memcpy(ptr, adev->uvd.saved_bo, size);
+ memcpy_toio(ptr, adev->uvd.saved_bo, size);
kfree(adev->uvd.saved_bo);
adev->uvd.saved_bo = NULL;
} else {
@@ -326,11 +324,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
- (adev->uvd.fw->size) - offset);
+ memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
- memset(ptr, 0, size);
+ memset_io(ptr, 0, size);
}
return 0;
@@ -346,8 +344,6 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct fence *fence;
- amdgpu_uvd_note_usage(adev);
-
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
if (r) {
@@ -438,7 +434,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
unsigned fs_in_mb = width_in_mb * height_in_mb;
unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
- unsigned min_ctx_size = 0;
+ unsigned min_ctx_size = ~0;
image_size = width * height;
image_size += image_size / 2;
@@ -557,7 +553,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
/* reference picture buffer */
min_dpb_size = image_size * num_dpb_buffer;
- if (adev->asic_type < CHIP_POLARIS10){
+ if (!adev->uvd.use_ctx_buf){
/* macroblock context buffer */
min_dpb_size +=
width_in_mb * height_in_mb * num_dpb_buffer * 192;
@@ -662,7 +658,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
}
DRM_ERROR("No more free UVD handles!\n");
- return -EINVAL;
+ return -ENOSPC;
case 1:
/* it's a decode msg, calc buffer sizes */
@@ -822,6 +818,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
return r;
break;
case mmUVD_ENGINE_CNTL:
+ case mmUVD_NO_OP:
break;
default:
DRM_ERROR("Invalid reg 0x%X!\n", reg);
@@ -913,8 +910,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
- amdgpu_uvd_note_usage(ctx.parser->adev);
-
return 0;
}
@@ -962,13 +957,15 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->ptr[3] = addr >> 32;
ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
ib->ptr[5] = 0;
- for (i = 6; i < 16; ++i)
- ib->ptr[i] = PACKET2(0);
+ for (i = 6; i < 16; i += 2) {
+ ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0);
+ ib->ptr[i+1] = 0;
+ }
ib->length_dw = 16;
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = f;
+ job->fence = fence_get(f);
if (r)
goto err_free;
@@ -1095,35 +1092,23 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.idle_work.work);
- unsigned i, fences, handles = 0;
-
- fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
-
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.handles[i]))
- ++handles;
+ unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
- if (fences == 0 && handles == 0) {
+ if (fences == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
- /* just work around for uvd clock remain high even
- * when uvd dpm disabled on Polaris10 */
- if (adev->asic_type == CHIP_POLARIS10)
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
}
} else {
- schedule_delayed_work(&adev->uvd.idle_work,
- msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+ schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
}
-static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
+void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
- set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
- msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
if (set_clocks) {
if (adev->pm.dpm_enabled) {
@@ -1133,3 +1118,48 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
}
}
}
+
+void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+}
+
+/**
+ * amdgpu_uvd_ring_test_ib - test ib execution
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct fence *fence;
+ long r;
+
+ r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ goto error;
+ }
+
+ r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ goto error;
+ }
+
+ r = fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ } else {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ }
+
+error:
+ fence_put(fence);
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 9a3b449081a7..c850009602d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -35,5 +35,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
+void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
+int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 875626a2eccb..da52af2a935a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -36,7 +36,7 @@
#include "cikd.h"
/* 1 second timeout */
-#define VCE_IDLE_TIMEOUT_MS 1000
+#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -85,8 +85,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
unsigned ucode_version, version_major, version_minor, binary_id;
int i, r;
- INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
-
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
@@ -197,6 +195,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
adev->vce.filp[i] = NULL;
}
+ INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
+ mutex_init(&adev->vce.idle_mutex);
+
return 0;
}
@@ -220,6 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->vce.ring[1]);
release_firmware(adev->vce.fw);
+ mutex_destroy(&adev->vce.idle_mutex);
return 0;
}
@@ -280,8 +282,8 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy(cpu_addr, (adev->vce.fw->data) + offset,
- (adev->vce.fw->size) - offset);
+ memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+ adev->vce.fw->size - offset);
amdgpu_bo_kunmap(adev->vce.vcpu_bo);
@@ -310,37 +312,44 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_asic_set_vce_clocks(adev, 0, 0);
}
} else {
- schedule_delayed_work(&adev->vce.idle_work,
- msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+ schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
}
}
/**
- * amdgpu_vce_note_usage - power up VCE
+ * amdgpu_vce_ring_begin_use - power up VCE
*
- * @adev: amdgpu_device pointer
+ * @ring: amdgpu ring
*
* Make sure VCE is powerd up when we want to use it
*/
-static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
+void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
{
- bool streams_changed = false;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
- set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
- msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
-
- if (adev->pm.dpm_enabled) {
- /* XXX figure out if the streams changed */
- streams_changed = false;
- }
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks;
- if (set_clocks || streams_changed) {
+ mutex_lock(&adev->vce.idle_mutex);
+ set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
+ if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
}
}
+ mutex_unlock(&adev->vce.idle_mutex);
+}
+
+/**
+ * amdgpu_vce_ring_end_use - power VCE down
+ *
+ * @ring: amdgpu ring
+ *
+ * Schedule work to power VCE down again
+ */
+void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
}
/**
@@ -357,11 +366,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
int i, r;
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
uint32_t handle = atomic_read(&adev->vce.handles[i]);
+
if (!handle || adev->vce.filp[i] != filp)
continue;
- amdgpu_vce_note_usage(adev);
-
r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
if (r)
DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
@@ -437,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = f;
+ job->fence = fence_get(f);
if (r)
goto err;
@@ -469,7 +477,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
- uint64_t dummy;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -477,7 +484,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
/* stitch together an VCE destroy msg */
ib->length_dw = 0;
@@ -485,11 +491,14 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = 0x00000014; /* len */
- ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
- ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000020; /* len */
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
+ ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
+ ib->ptr[ib->length_dw++] = 0x00000000;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+ ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
+ ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000008; /* len */
ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
@@ -499,7 +508,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = f;
+ job->fence = fence_get(f);
if (r)
goto err;
@@ -580,12 +589,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
* we we don't have another free session index.
*/
static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
- uint32_t handle, bool *allocated)
+ uint32_t handle, uint32_t *allocated)
{
unsigned i;
- *allocated = false;
-
/* validate the handle */
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
if (atomic_read(&p->adev->vce.handles[i]) == handle) {
@@ -602,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
p->adev->vce.filp[i] = p->filp;
p->adev->vce.img_size[i] = 0;
- *allocated = true;
+ *allocated |= 1 << i;
return i;
}
}
@@ -622,15 +629,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
unsigned fb_idx = 0, bs_idx = 0;
int session_idx = -1;
- bool destroyed = false;
- bool created = false;
- bool allocated = false;
+ uint32_t destroyed = 0;
+ uint32_t created = 0;
+ uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
int i, r = 0, idx = 0;
- amdgpu_vce_note_usage(p->adev);
-
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
@@ -641,30 +646,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
}
- if (destroyed) {
- DRM_ERROR("No other command allowed after destroy!\n");
- r = -EINVAL;
- goto out;
- }
-
switch (cmd) {
- case 0x00000001: // session
+ case 0x00000001: /* session */
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated);
- if (session_idx < 0)
- return session_idx;
+ if (session_idx < 0) {
+ r = session_idx;
+ goto out;
+ }
size = &p->adev->vce.img_size[session_idx];
break;
- case 0x00000002: // task info
+ case 0x00000002: /* task info */
fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
break;
- case 0x01000001: // create
- created = true;
- if (!allocated) {
+ case 0x01000001: /* create */
+ created |= 1 << session_idx;
+ if (destroyed & (1 << session_idx)) {
+ destroyed &= ~(1 << session_idx);
+ allocated |= 1 << session_idx;
+
+ } else if (!(allocated & (1 << session_idx))) {
DRM_ERROR("Handle already in use!\n");
r = -EINVAL;
goto out;
@@ -675,16 +680,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
8 * 3 / 2;
break;
- case 0x04000001: // config extension
- case 0x04000002: // pic control
- case 0x04000005: // rate control
- case 0x04000007: // motion estimation
- case 0x04000008: // rdo
- case 0x04000009: // vui
- case 0x05000002: // auxiliary buffer
+ case 0x04000001: /* config extension */
+ case 0x04000002: /* pic control */
+ case 0x04000005: /* rate control */
+ case 0x04000007: /* motion estimation */
+ case 0x04000008: /* rdo */
+ case 0x04000009: /* vui */
+ case 0x05000002: /* auxiliary buffer */
break;
- case 0x03000001: // encode
+ case 0x03000001: /* encode */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
*size, 0);
if (r)
@@ -696,18 +701,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
break;
- case 0x02000001: // destroy
- destroyed = true;
+ case 0x02000001: /* destroy */
+ destroyed |= 1 << session_idx;
break;
- case 0x05000001: // context buffer
+ case 0x05000001: /* context buffer */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
*size * 2, 0);
if (r)
goto out;
break;
- case 0x05000004: // video bitstream buffer
+ case 0x05000004: /* video bitstream buffer */
tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
tmp, bs_idx);
@@ -715,7 +720,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
break;
- case 0x05000005: // feedback buffer
+ case 0x05000005: /* feedback buffer */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
4096, fb_idx);
if (r)
@@ -737,21 +742,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
idx += len / 4;
}
- if (allocated && !created) {
+ if (allocated & ~created) {
DRM_ERROR("New session without create command!\n");
r = -ENOENT;
}
out:
- if ((!r && destroyed) || (r && allocated)) {
- /*
- * IB contains a destroy msg or we have allocated an
- * handle and got an error, anyway free the handle
- */
- for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
- atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
+ if (!r) {
+ /* No error, free all destroyed handle slots */
+ tmp = destroyed;
+ } else {
+ /* Error during parsing, free all allocated handle slots */
+ tmp = allocated;
}
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+ if (tmp & (1 << i))
+ atomic_set(&p->adev->vce.handles[i], 0);
+
return r;
}
@@ -837,10 +845,10 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
* @ring: the engine to test on
*
*/
-int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct fence *fence = NULL;
- int r;
+ long r;
/* skip vce ring1 ib test for now, since it's not reliable */
if (ring == &ring->adev->vce.ring[1])
@@ -848,21 +856,25 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
goto error;
}
r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
goto error;
}
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
} else {
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
}
error:
fence_put(fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index f40cf761c66f..63f83d0d985c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -39,6 +39,8 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
-int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring);
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9f36ed30ba11..bf56f1814437 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -50,19 +51,22 @@
* SI supports 16.
*/
-/* Special value that no flush is necessary */
-#define AMDGPU_VM_NO_FLUSH (~0ll)
-
/* Local structure. Encapsulate some VM table update parameters to reduce
* the number of function parameters
*/
-struct amdgpu_vm_update_params {
+struct amdgpu_pte_update_params {
+ /* amdgpu device we do this update for */
+ struct amdgpu_device *adev;
/* address where to copy page table entries from */
uint64_t src;
- /* DMA addresses to use for mapping */
- dma_addr_t *pages_addr;
/* indirect buffer to fill with commands */
struct amdgpu_ib *ib;
+ /* Function which actually does the update */
+ void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint32_t flags);
+ /* indicate update pt or its shadow */
+ bool shadow;
};
/**
@@ -114,16 +118,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
/**
* amdgpu_vm_get_bos - add the vm BOs to a duplicates list
*
+ * @adev: amdgpu device pointer
* @vm: vm providing the BOs
* @duplicates: head of duplicates list
*
* Add the page directory to the BO duplicates list
* for command submission.
*/
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct list_head *duplicates)
{
+ uint64_t num_evictions;
unsigned i;
+ /* We only need to validate the page tables
+ * if they aren't already valid.
+ */
+ num_evictions = atomic64_read(&adev->num_evictions);
+ if (num_evictions == vm->last_eviction_counter)
+ return;
+
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
@@ -162,6 +176,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_unlock(&glob->lru_lock);
}
+static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vm_id *id)
+{
+ return id->current_gpu_reset_count !=
+ atomic_read(&adev->gpu_reset_counter) ? true : false;
+}
+
/**
* amdgpu_vm_grab_id - allocate the next free VMID
*
@@ -174,18 +195,67 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
- unsigned *vm_id, uint64_t *vm_pd_addr)
+ struct amdgpu_job *job)
{
- uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev;
+ uint64_t fence_context = adev->fence_context + ring->idx;
struct fence *updates = sync->last_vm_update;
- struct amdgpu_vm_id *id;
- unsigned i = ring->idx;
- int r;
+ struct amdgpu_vm_id *id, *idle;
+ struct fence **fences;
+ unsigned i;
+ int r = 0;
+
+ fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
+ GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
mutex_lock(&adev->vm_manager.lock);
+ /* Check if we have an idle VMID */
+ i = 0;
+ list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
+ fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+ if (!fences[i])
+ break;
+ ++i;
+ }
+
+ /* If we can't find a idle VMID to use, wait till one becomes available */
+ if (&idle->list == &adev->vm_manager.ids_lru) {
+ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+ struct fence_array *array;
+ unsigned j;
+
+ for (j = 0; j < i; ++j)
+ fence_get(fences[j]);
+
+ array = fence_array_create(i, fences, fence_context,
+ seqno, true);
+ if (!array) {
+ for (j = 0; j < i; ++j)
+ fence_put(fences[j]);
+ kfree(fences);
+ r = -ENOMEM;
+ goto error;
+ }
+
+
+ r = amdgpu_sync_fence(ring->adev, sync, &array->base);
+ fence_put(&array->base);
+ if (r)
+ goto error;
+
+ mutex_unlock(&adev->vm_manager.lock);
+ return 0;
+
+ }
+ kfree(fences);
+
+ job->vm_needs_flush = true;
/* Check if we can use a VMID already assigned to this VM */
+ i = ring->idx;
do {
struct fence *flushed;
@@ -196,67 +266,52 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
+ if (amdgpu_vm_is_gpu_reset(adev, id))
+ continue;
if (atomic64_read(&id->owner) != vm->client_id)
continue;
- if (pd_addr != id->pd_gpu_addr)
+ if (job->vm_pd_addr != id->pd_gpu_addr)
continue;
- if (id->last_user != ring &&
- (!id->last_flush || !fence_is_signaled(id->last_flush)))
+ if (!id->last_flush)
continue;
- flushed = id->flushed_updates;
- if (updates && (!flushed || fence_is_later(updates, flushed)))
+ if (id->last_flush->context != fence_context &&
+ !fence_is_signaled(id->last_flush))
continue;
- /* Good we can use this VMID */
- if (id->last_user == ring) {
- r = amdgpu_sync_fence(ring->adev, sync,
- id->first);
- if (r)
- goto error;
- }
+ flushed = id->flushed_updates;
+ if (updates &&
+ (!flushed || fence_is_later(updates, flushed)))
+ continue;
- /* And remember this submission as user of the VMID */
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
+ id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
- *vm_id = id - adev->vm_manager.ids;
- *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+ job->vm_id = id - adev->vm_manager.ids;
+ job->vm_needs_flush = false;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, job);
mutex_unlock(&adev->vm_manager.lock);
return 0;
} while (i != ring->idx);
- id = list_first_entry(&adev->vm_manager.ids_lru,
- struct amdgpu_vm_id,
- list);
-
- if (!amdgpu_sync_is_idle(&id->active)) {
- struct list_head *head = &adev->vm_manager.ids_lru;
- struct amdgpu_vm_id *tmp;
-
- list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
- list) {
- if (amdgpu_sync_is_idle(&id->active)) {
- list_move(&id->list, head);
- head = &id->list;
- }
- }
- id = list_first_entry(&adev->vm_manager.ids_lru,
- struct amdgpu_vm_id,
- list);
- }
+ /* Still no ID to use? Then use the idle one found earlier */
+ id = idle;
- r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
+ /* Remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
@@ -269,22 +324,46 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates);
- id->pd_gpu_addr = pd_addr;
-
+ id->pd_gpu_addr = job->vm_pd_addr;
+ id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
- id->last_user = ring;
atomic64_set(&id->owner, vm->client_id);
vm->ids[ring->idx] = id;
- *vm_id = id - adev->vm_manager.ids;
- *vm_pd_addr = pd_addr;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+ job->vm_id = id - adev->vm_manager.ids;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, job);
error:
mutex_unlock(&adev->vm_manager.lock);
return r;
}
+static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ const struct amdgpu_ip_block_version *ip_block;
+
+ if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+ /* only compute rings */
+ return false;
+
+ ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (!ip_block)
+ return false;
+
+ if (ip_block->major <= 7) {
+ /* gfx7 has no workaround */
+ return true;
+ } else if (ip_block->major == 8) {
+ if (adev->gfx.mec_fw_version >= 673)
+ /* gfx8 is fixed in MEC firmware 673 */
+ return false;
+ else
+ return true;
+ }
+ return false;
+}
+
/**
* amdgpu_vm_flush - hardware flush the vm
*
@@ -294,59 +373,52 @@ error:
*
* Emit a VM flush when it is necessary.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
+ struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
- id->gds_base != gds_base ||
- id->gds_size != gds_size ||
- id->gws_base != gws_base ||
- id->gws_size != gws_size ||
- id->oa_base != oa_base ||
- id->oa_size != oa_size);
+ id->gds_base != job->gds_base ||
+ id->gds_size != job->gds_size ||
+ id->gws_base != job->gws_base ||
+ id->gws_size != job->gws_size ||
+ id->oa_base != job->oa_base ||
+ id->oa_size != job->oa_size);
int r;
if (ring->funcs->emit_pipeline_sync && (
- pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
- ring->type == AMDGPU_RING_TYPE_COMPUTE))
+ job->vm_needs_flush || gds_switch_needed ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring)))
amdgpu_ring_emit_pipeline_sync(ring);
- if (ring->funcs->emit_vm_flush &&
- pd_addr != AMDGPU_VM_NO_FLUSH) {
+ if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
+ amdgpu_vm_is_gpu_reset(adev, id))) {
struct fence *fence;
- trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
- amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+ trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
+ amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+ return r;
mutex_lock(&adev->vm_manager.lock);
- if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
- r = amdgpu_fence_emit(ring, &fence);
- if (r) {
- mutex_unlock(&adev->vm_manager.lock);
- return r;
- }
- fence_put(id->last_flush);
- id->last_flush = fence;
- }
+ fence_put(id->last_flush);
+ id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
if (gds_switch_needed) {
- id->gds_base = gds_base;
- id->gds_size = gds_size;
- id->gws_base = gws_base;
- id->gws_size = gws_size;
- id->oa_base = oa_base;
- id->oa_size = oa_size;
- amdgpu_ring_emit_gds_switch(ring, vm_id,
- gds_base, gds_size,
- gws_base, gws_size,
- oa_base, oa_size);
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ amdgpu_ring_emit_gds_switch(ring, job->vm_id,
+ job->gds_base, job->gds_size,
+ job->gws_base, job->gws_size,
+ job->oa_base, job->oa_size);
}
return 0;
@@ -398,10 +470,9 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_update_pages - helper to call the right asic function
+ * amdgpu_vm_do_set_ptes - helper to call the right asic function
*
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
+ * @params: see amdgpu_pte_update_params definition
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -411,35 +482,47 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* Traces the parameters and calls the right asic functions
* to setup the page table using the DMA.
*/
-static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
- struct amdgpu_vm_update_params
- *vm_update_params,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint32_t flags)
+static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
+ uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr,
+ uint32_t flags)
{
trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
- if (vm_update_params->src) {
- amdgpu_vm_copy_pte(adev, vm_update_params->ib,
- pe, (vm_update_params->src + (addr >> 12) * 8), count);
-
- } else if (vm_update_params->pages_addr) {
- amdgpu_vm_write_pte(adev, vm_update_params->ib,
- vm_update_params->pages_addr,
- pe, addr, count, incr, flags);
-
- } else if (count < 3) {
- amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
- count, incr, flags);
+ if (count < 3) {
+ amdgpu_vm_write_pte(params->adev, params->ib, pe,
+ addr | flags, count, incr);
} else {
- amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
+ amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
count, incr, flags);
}
}
/**
+ * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
+ uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr,
+ uint32_t flags)
+{
+ trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
+
+ amdgpu_vm_copy_pte(params->adev, params->ib, pe,
+ (params->src + (addr >> 12) * 8), count);
+}
+
+/**
* amdgpu_vm_clear_bo - initially clear the page dir/table
*
* @adev: amdgpu_device pointer
@@ -454,12 +537,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
struct fence *fence = NULL;
struct amdgpu_job *job;
- struct amdgpu_vm_update_params vm_update_params;
+ struct amdgpu_pte_update_params params;
unsigned entries;
uint64_t addr;
int r;
- memset(&vm_update_params, 0, sizeof(vm_update_params));
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -477,9 +559,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
- vm_update_params.ib = &job->ibs[0];
- amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
- 0, 0);
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.ib = &job->ibs[0];
+ amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > 64);
@@ -508,55 +591,41 @@ error:
* Look up the physical address of the page that the pte resolves
* to and return the pointer for the page table entry.
*/
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
uint64_t result;
- if (pages_addr) {
- /* page table offset */
- result = pages_addr[addr >> PAGE_SHIFT];
-
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
+ /* page table offset */
+ result = pages_addr[addr >> PAGE_SHIFT];
- } else {
- /* No mapping required */
- result = addr;
- }
+ /* in case cpu page size != gpu page size*/
+ result |= addr & (~PAGE_MASK);
result &= 0xFFFFFFFFFFFFF000ULL;
return result;
}
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ bool shadow)
{
struct amdgpu_ring *ring;
- struct amdgpu_bo *pd = vm->page_directory;
- uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
+ struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+ vm->page_directory;
+ uint64_t pd_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
- struct amdgpu_vm_update_params vm_update_params;
+ struct amdgpu_pte_update_params params;
struct fence *fence = NULL;
int r;
- memset(&vm_update_params, 0, sizeof(vm_update_params));
+ if (!pd)
+ return 0;
+ pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
/* padding, etc. */
@@ -569,7 +638,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (r)
return r;
- vm_update_params.ib = &job->ibs[0];
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -580,19 +651,25 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
continue;
pt = amdgpu_bo_gpu_offset(bo);
- if (vm->page_tables[pt_idx].addr == pt)
- continue;
- vm->page_tables[pt_idx].addr = pt;
+ if (!shadow) {
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+ vm->page_tables[pt_idx].addr = pt;
+ } else {
+ if (vm->page_tables[pt_idx].shadow_addr == pt)
+ continue;
+ vm->page_tables[pt_idx].shadow_addr = pt;
+ }
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
- ((last_pt + incr * count) != pt)) {
+ ((last_pt + incr * count) != pt) ||
+ (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
- amdgpu_vm_update_pages(adev, &vm_update_params,
- last_pde, last_pt,
- count, incr,
- AMDGPU_PTE_VALID);
+ amdgpu_vm_do_set_ptes(&params, last_pde,
+ last_pt, count, incr,
+ AMDGPU_PTE_VALID);
}
count = 1;
@@ -604,15 +681,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
- amdgpu_vm_update_pages(adev, &vm_update_params,
- last_pde, last_pt,
- count, incr, AMDGPU_PTE_VALID);
+ amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
- if (vm_update_params.ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+ if (params.ib->length_dw != 0) {
+ amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(vm_update_params.ib->length_dw > ndw);
+ WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
@@ -634,21 +710,135 @@ error_free:
return r;
}
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ int r;
+
+ r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+ if (r)
+ return r;
+ return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
+}
+
/**
+ * amdgpu_vm_update_ptes - make sure that page tables are valid
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to, the next dst inside the function
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end.
+ */
+static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_vm *vm,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
+{
+ const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
+
+ uint64_t cur_pe_start, cur_nptes, cur_dst;
+ uint64_t addr; /* next GPU address to be updated */
+ uint64_t pt_idx;
+ struct amdgpu_bo *pt;
+ unsigned nptes; /* next number of ptes to be updated */
+ uint64_t next_pe_start;
+
+ /* initialize the variables */
+ addr = start;
+ pt_idx = addr >> amdgpu_vm_block_size;
+ pt = vm->page_tables[pt_idx].entry.robj;
+ if (params->shadow) {
+ if (!pt->shadow)
+ return;
+ pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ }
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+ cur_pe_start = amdgpu_bo_gpu_offset(pt);
+ cur_pe_start += (addr & mask) * 8;
+ cur_nptes = nptes;
+ cur_dst = dst;
+
+ /* for next ptb*/
+ addr += nptes;
+ dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+
+ /* walk over the address space and update the page tables */
+ while (addr < end) {
+ pt_idx = addr >> amdgpu_vm_block_size;
+ pt = vm->page_tables[pt_idx].entry.robj;
+ if (params->shadow) {
+ if (!pt->shadow)
+ return;
+ pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ }
+
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+ next_pe_start = amdgpu_bo_gpu_offset(pt);
+ next_pe_start += (addr & mask) * 8;
+
+ if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
+ ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
+ /* The next ptb is consecutive to current ptb.
+ * Don't call the update function now.
+ * Will update two ptbs together in future.
+ */
+ cur_nptes += nptes;
+ } else {
+ params->func(params, cur_pe_start, cur_dst, cur_nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
+
+ cur_pe_start = next_pe_start;
+ cur_nptes = nptes;
+ cur_dst = dst;
+ }
+
+ /* for next ptb*/
+ addr += nptes;
+ dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+ }
+
+ params->func(params, cur_pe_start, cur_dst, cur_nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
+}
+
+/*
* amdgpu_vm_frag_ptes - add fragment information to PTEs
*
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
- * @pe_start: first PTE to handle
- * @pe_end: last PTE to handle
- * @addr: addr those PTEs should point to
+ * @params: see amdgpu_pte_update_params definition
+ * @vm: requested vm
+ * @start: first PTE to handle
+ * @end: last PTE to handle
+ * @dst: addr those PTEs should point to
* @flags: hw mapping flags
*/
-static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
- struct amdgpu_vm_update_params
- *vm_update_params,
- uint64_t pe_start, uint64_t pe_end,
- uint64_t addr, uint32_t flags)
+static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_vm *vm,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
{
/**
* The MC L1 TLB supports variable sized pages, based on a fragment
@@ -669,117 +859,48 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* allocation size to the fragment size.
*/
- /* SI and newer are optimized for 64KB */
- uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
- uint64_t frag_align = 0x80;
+ const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
- uint64_t frag_start = ALIGN(pe_start, frag_align);
- uint64_t frag_end = pe_end & ~(frag_align - 1);
+ uint64_t frag_start = ALIGN(start, frag_align);
+ uint64_t frag_end = end & ~(frag_align - 1);
- unsigned count;
-
- /* Abort early if there isn't anything to do */
- if (pe_start == pe_end)
- return;
+ uint32_t frag;
/* system pages are non continuously */
- if (vm_update_params->src || vm_update_params->pages_addr ||
- !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
+ if (params->src || !(flags & AMDGPU_PTE_VALID) ||
+ (frag_start >= frag_end)) {
- count = (pe_end - pe_start) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
- addr, count, AMDGPU_GPU_PAGE_SIZE,
- flags);
+ amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
return;
}
+ /* use more than 64KB fragment size if possible */
+ frag = lower_32_bits(frag_start | frag_end);
+ frag = likely(frag) ? __ffs(frag) : 31;
+
/* handle the 4K area at the beginning */
- if (pe_start != frag_start) {
- count = (frag_start - pe_start) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
- count, AMDGPU_GPU_PAGE_SIZE, flags);
- addr += AMDGPU_GPU_PAGE_SIZE * count;
+ if (start != frag_start) {
+ amdgpu_vm_update_ptes(params, vm, start, frag_start,
+ dst, flags);
+ dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
}
/* handle the area in the middle */
- count = (frag_end - frag_start) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
- AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
+ amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
+ flags | AMDGPU_PTE_FRAG(frag));
/* handle the 4K area at the end */
- if (frag_end != pe_end) {
- addr += AMDGPU_GPU_PAGE_SIZE * count;
- count = (pe_end - frag_end) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
- count, AMDGPU_GPU_PAGE_SIZE, flags);
- }
-}
-
-/**
- * amdgpu_vm_update_ptes - make sure that page tables are valid
- *
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @dst: destination address to map to
- * @flags: mapping flags
- *
- * Update the page tables in the range @start - @end.
- */
-static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
- struct amdgpu_vm_update_params
- *vm_update_params,
- struct amdgpu_vm *vm,
- uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
-{
- const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
-
- uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
- uint64_t addr;
-
- /* walk over the address space and update the page tables */
- for (addr = start; addr < end; ) {
- uint64_t pt_idx = addr >> amdgpu_vm_block_size;
- struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
- unsigned nptes;
- uint64_t pe_start;
-
- if ((addr & ~mask) == (end & ~mask))
- nptes = end - addr;
- else
- nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
-
- pe_start = amdgpu_bo_gpu_offset(pt);
- pe_start += (addr & mask) * 8;
-
- if (last_pe_end != pe_start) {
-
- amdgpu_vm_frag_ptes(adev, vm_update_params,
- last_pe_start, last_pe_end,
- last_dst, flags);
-
- last_pe_start = pe_start;
- last_pe_end = pe_start + 8 * nptes;
- last_dst = dst;
- } else {
- last_pe_end += 8 * nptes;
- }
-
- addr += nptes;
- dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+ if (frag_end != end) {
+ dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
+ amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
}
-
- amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
- last_pe_end, last_dst, flags);
}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
@@ -793,6 +914,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
@@ -804,14 +926,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
- struct amdgpu_vm_update_params vm_update_params;
+ struct amdgpu_pte_update_params params;
struct fence *f = NULL;
int r;
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.src = src;
+
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
- memset(&vm_update_params, 0, sizeof(vm_update_params));
- vm_update_params.src = src;
- vm_update_params.pages_addr = pages_addr;
+
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.src = src;
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
@@ -828,30 +955,56 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
- if (vm_update_params.src) {
+ if (src) {
/* only copy commands needed */
ndw += ncmds * 7;
- } else if (vm_update_params.pages_addr) {
- /* header for write data commands */
- ndw += ncmds * 4;
+ params.func = amdgpu_vm_do_copy_ptes;
+
+ } else if (pages_addr) {
+ /* copy commands needed */
+ ndw += ncmds * 7;
- /* body of write data command */
+ /* and also PTEs */
ndw += nptes * 2;
+ params.func = amdgpu_vm_do_copy_ptes;
+
} else {
/* set page commands needed */
ndw += ncmds * 10;
/* two extra commands for begin/end of fragment */
ndw += 2 * 10;
+
+ params.func = amdgpu_vm_do_set_ptes;
}
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
- vm_update_params.ib = &job->ibs[0];
+ params.ib = &job->ibs[0];
+
+ if (!src && pages_addr) {
+ uint64_t *pte;
+ unsigned i;
+
+ /* Put the PTEs at the end of the IB. */
+ i = ndw - nptes * 2;
+ pte= (uint64_t *)&(job->ibs->ptr[i]);
+ params.src = job->ibs->gpu_addr + i * 4;
+
+ for (i = 0; i < nptes; ++i) {
+ pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
+ AMDGPU_GPU_PAGE_SIZE);
+ pte[i] |= flags;
+ }
+ }
+
+ r = amdgpu_sync_fence(adev, &job->sync, exclusive);
+ if (r)
+ goto error_free;
r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
owner);
@@ -862,11 +1015,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
- last + 1, addr, flags);
+ params.shadow = true;
+ amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
+ params.shadow = false;
+ amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
- amdgpu_ring_pad_ib(ring, vm_update_params.ib);
- WARN_ON(vm_update_params.ib->length_dw > ndw);
+ amdgpu_ring_pad_ib(ring, params.ib);
+ WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &f);
if (r)
@@ -889,6 +1044,7 @@ error_free:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
* @gtt_flags: flags as they are used for GTT
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
@@ -902,6 +1058,7 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ struct fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
@@ -932,7 +1089,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
addr += mapping->offset;
if (!pages_addr || src)
- return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+ return amdgpu_vm_bo_update_mapping(adev, exclusive,
+ src, pages_addr, vm,
start, mapping->it.last,
flags, addr, fence);
@@ -940,7 +1098,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t last;
last = min((uint64_t)mapping->it.last, start + max_size - 1);
- r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, exclusive,
+ src, pages_addr, vm,
start, last, flags, addr,
fence);
if (r)
@@ -958,27 +1117,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object
- * @mem: ttm mem
+ * @clear: if true clear the entries
*
* Fill in the page table entries for @bo_va.
* Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved and mutex must be locked!
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- struct ttm_mem_reg *mem)
+ bool clear)
{
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
+ struct ttm_mem_reg *mem;
+ struct fence *exclusive;
uint64_t addr;
int r;
- if (mem) {
+ if (clear) {
+ mem = NULL;
+ addr = 0;
+ exclusive = NULL;
+ } else {
struct ttm_dma_tt *ttm;
+ mem = &bo_va->bo->tbo.mem;
addr = (u64)mem->start << PAGE_SHIFT;
switch (mem->mem_type) {
case TTM_PL_TT:
@@ -994,8 +1158,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
default:
break;
}
- } else {
- addr = 0;
+
+ exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
@@ -1007,7 +1171,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, exclusive,
+ gtt_flags, pages_addr, vm,
mapping, flags, addr,
&bo_va->last_pt_update);
if (r)
@@ -1025,7 +1190,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->vm_status);
- if (!mem)
+ if (clear)
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
@@ -1054,7 +1219,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
+ r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
0, 0, NULL);
kfree(mapping);
if (r)
@@ -1088,7 +1253,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
struct amdgpu_bo_va, vm_status);
spin_unlock(&vm->status_lock);
- r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, true);
if (r)
return r;
@@ -1233,7 +1398,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_SHADOW,
NULL, resv, &pt);
if (r)
goto error_free;
@@ -1432,7 +1598,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_SHADOW,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
@@ -1445,6 +1612,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unreserve(vm->page_directory);
if (r)
goto error_free_page_directory;
+ vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
return 0;
@@ -1487,10 +1655,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
kfree(mapping);
}
- for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
+ for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
+ if (vm->page_tables[i].entry.robj &&
+ vm->page_tables[i].entry.robj->shadow)
+ amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow);
amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
+ }
drm_free_large(vm->page_tables);
+ if (vm->page_directory->shadow)
+ amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
}
@@ -1516,6 +1690,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
&adev->vm_manager.ids_lru);
}
+ adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ adev->vm_manager.seqno[i] = 0;
+
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 7f85c2c1d681..f81068ba4cc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -88,7 +88,6 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
/* timeout */
if (args.v2.ucReplyStatus == 1) {
- DRM_DEBUG_KMS("dp_aux_ch timeout\n");
r = -ETIMEDOUT;
goto done;
}
@@ -339,22 +338,21 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
{
struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
- int ret, i;
+ int ret;
- for (i = 0; i < 7; i++) {
- ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg,
- DP_DPCD_SIZE);
- if (ret == DP_DPCD_SIZE) {
- memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+ ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV,
+ msg, DP_DPCD_SIZE);
+ if (ret == DP_DPCD_SIZE) {
+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
- dig_connector->dpcd);
+ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+ dig_connector->dpcd);
- amdgpu_atombios_dp_probe_oui(amdgpu_connector);
+ amdgpu_atombios_dp_probe_oui(amdgpu_connector);
- return 0;
- }
+ return 0;
}
+
dig_connector->dpcd[0] = 0;
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 48b6bd671cda..c32eca26155c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 5ec1f1e9c983..a0d63a293bb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -50,7 +50,9 @@
#include "gmc/gmc_7_1_sh_mask.h"
MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -84,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
+#if 0
static const struct ci_pt_defaults defaults_bonaire_pro =
{
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
{ 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
};
+#endif
static const struct ci_pt_defaults defaults_saturn_xt =
{
@@ -98,12 +102,14 @@ static const struct ci_pt_defaults defaults_saturn_xt =
{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
+#if 0
static const struct ci_pt_defaults defaults_saturn_pro =
{
1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
{ 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
};
+#endif
static const struct ci_pt_config_reg didt_config_ci[] =
{
@@ -736,19 +742,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
if (pi->caps_sq_ramping || pi->caps_db_ramping ||
pi->caps_td_ramping || pi->caps_tcp_ramping) {
- gfx_v7_0_enter_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable) {
ret = ci_program_pt_config_registers(adev, didt_config_ci);
if (ret) {
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return ret;
}
}
ci_do_enable_didt(adev, enable);
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
@@ -3030,7 +3036,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
if (pi->mclk_stutter_mode_threshold &&
(memory_clock <= pi->mclk_stutter_mode_threshold) &&
- (pi->uvd_enabled == false) &&
+ (!pi->uvd_enabled) &&
(RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
(adev->pm.dpm.new_active_crtc_count <= 2))
memory_level->StutterEnable = true;
@@ -3636,6 +3642,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
ci_setup_default_pcie_tables(adev);
+ /* save a copy of the default DPM table */
+ memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
+ sizeof(struct ci_dpm_table));
+
return 0;
}
@@ -5754,13 +5764,22 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- chip_name = "bonaire";
+ if ((adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f))
+ chip_name = "bonaire_k";
+ else
+ chip_name = "bonaire";
break;
case CHIP_HAWAII:
- chip_name = "hawaii";
+ if (adev->pdev->revision == 0x80)
+ chip_name = "hawaii_k";
+ else
+ chip_name = "hawaii";
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
default: BUG();
}
@@ -5855,7 +5874,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
pi->pcie_dpm_key_disabled = 0;
pi->thermal_sclk_dpm_enabled = 0;
- pi->caps_sclk_ds = true;
+ if (amdgpu_sclk_deep_sleep_en)
+ pi->caps_sclk_ds = true;
+ else
+ pi->caps_sclk_ds = false;
pi->mclk_strobe_mode_threshold = 40000;
pi->mclk_stutter_mode_threshold = 40000;
@@ -6404,6 +6426,186 @@ static int ci_dpm_set_powergating_state(void *handle,
return 0;
}
+static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type, char *buf)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+ struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+ struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+
+ int i, now, size = 0;
+ uint32_t clock, pcie_speed;
+
+ switch (type) {
+ case PP_SCLK:
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
+ clock = RREG32(mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (clock > sclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
+ clock = RREG32(mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (clock > mclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_PCIE:
+ pcie_speed = ci_get_current_pcie_speed(adev);
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_speed != pcie_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (adev->pm.dpm.forced_level
+ != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ if (!pi->sclk_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ break;
+
+ case PP_MCLK:
+ if (!pi->mclk_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ break;
+
+ case PP_PCIE:
+ {
+ uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ uint32_t level = 0;
+
+ while (tmp >>= 1)
+ level++;
+
+ if (!pi->pcie_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ level);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
+ struct ci_single_dpm_table *golden_sclk_table =
+ &(pi->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+ struct ci_single_dpm_table *golden_sclk_table =
+ &(pi->golden_dpm_table.sclk_table);
+
+ if (value > 20)
+ value = 20;
+
+ ps->performance_levels[ps->performance_level_count - 1].sclk =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
+ struct ci_single_dpm_table *golden_mclk_table =
+ &(pi->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+ struct ci_single_dpm_table *golden_mclk_table =
+ &(pi->golden_dpm_table.mclk_table);
+
+ if (value > 20)
+ value = 20;
+
+ ps->performance_levels[ps->performance_level_count - 1].mclk =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
+
const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
@@ -6438,6 +6640,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.get_fan_control_mode = &ci_dpm_get_fan_control_mode,
.set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
.get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
+ .print_clock_levels = ci_dpm_print_clock_levels,
+ .force_clock_level = ci_dpm_force_clock_level,
+ .get_sclk_od = ci_dpm_get_sclk_od,
+ .set_sclk_od = ci_dpm_set_sclk_od,
+ .get_mclk_od = ci_dpm_get_mclk_od,
+ .set_mclk_od = ci_dpm_set_mclk_od,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
index faccc30c93bf..91be2996ae7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
@@ -193,6 +193,7 @@ struct ci_pt_defaults {
struct ci_power_info {
struct ci_dpm_table dpm_table;
+ struct ci_dpm_table golden_dpm_table;
u32 voltage_control;
u32 mvdd_control;
u32 vddci_control;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 910431808542..825de800b798 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,6 +67,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
+#include "dce_virtual.h"
/*
* Indirect registers accessor
@@ -879,7 +880,7 @@ static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
uint32_t tmp;
tmp = RREG32(mmCONFIG_CNTL);
- if (state == false)
+ if (!state)
tmp |= CONFIG_CNTL__VGA_DIS_MASK;
else
tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
@@ -1035,12 +1036,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v7_0_select_se_sh(adev, se_num, sh_num);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -1158,10 +1159,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
}
-static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
{
struct kv_reset_save_regs kv_save = { 0 };
u32 i;
+ int r = -EINVAL;
dev_info(adev->dev, "GPU pci config reset\n");
@@ -1177,14 +1179,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ r = 0;
break;
+ }
udelay(1);
}
/* does asic init need to be run first??? */
if (adev->flags & AMD_IS_APU)
kv_restore_regs_for_reset(adev, &kv_save);
+
+ return r;
}
static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -1210,13 +1218,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
*/
static int cik_asic_reset(struct amdgpu_device *adev)
{
+ int r;
cik_set_bios_scratch_engine_hung(adev, true);
- cik_gpu_pci_config_reset(adev);
+ r = cik_gpu_pci_config_reset(adev);
cik_set_bios_scratch_engine_hung(adev, false);
- return 0;
+ return r;
}
static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -1700,6 +1709,74 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1768,6 +1845,74 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1836,6 +1981,74 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1904,6 +2117,74 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1972,32 +2253,128 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
+ if (adev->enable_virtual_display) {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->ip_blocks = bonaire_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
+ break;
+ case CHIP_HAWAII:
+ adev->ip_blocks = hawaii_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
+ break;
+ case CHIP_KAVERI:
+ adev->ip_blocks = kaveri_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
+ break;
+ case CHIP_KABINI:
+ adev->ip_blocks = kabini_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
+ break;
+ case CHIP_MULLINS:
+ adev->ip_blocks = mullins_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->ip_blocks = bonaire_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
+ break;
+ case CHIP_HAWAII:
+ adev->ip_blocks = hawaii_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
+ break;
+ case CHIP_KAVERI:
+ adev->ip_blocks = kaveri_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
+ break;
+ case CHIP_KABINI:
+ adev->ip_blocks = kabini_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
+ break;
+ case CHIP_MULLINS:
+ adev->ip_blocks = mullins_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
}
return 0;
@@ -2014,9 +2391,6 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
.get_virtual_caps = &cik_get_virtual_caps,
- /* these should be moved to their own ip modules */
- .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
- .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 9dc4e24e31e7..e71cd12104b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -224,17 +224,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 extra_bits = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 4)
- next_rptr++;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1); /* number of DWs to follow */
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
@@ -365,7 +354,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
u32 me_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
cik_sdma_gfx_stop(adev);
cik_sdma_rlc_stop(adev);
}
@@ -628,20 +617,19 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
* Test a simple IB in the DMA ring (CIK).
* Returns 0 on success, error on failure.
*/
-static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
+static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
- unsigned i;
unsigned index;
- int r;
u32 tmp = 0;
u64 gpu_addr;
+ long r;
r = amdgpu_wb_get(adev, &index);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
@@ -651,11 +639,12 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
- ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = 1;
@@ -665,28 +654,25 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err1;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err1;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err0:
@@ -708,24 +694,16 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
- SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
@@ -733,39 +711,27 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
-static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
- SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- value = amdgpu_vm_map_gart(pages_addr, addr);
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
}
}
@@ -781,40 +747,21 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
*
* Update the page tables using sDMA (CIK).
*/
-static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
- uint64_t pe,
+static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & AMDGPU_PTE_VALID)
- value = addr;
- else
- value = 0;
-
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 933e425a8154..794c5f36ca68 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->mgcg_cgtt_local1 = 0x0;
pi->clock_slow_down_step = 25000;
pi->skip_clock_slow_down = 1;
- pi->enable_nb_ps_policy = 0;
+ pi->enable_nb_ps_policy = false;
pi->caps_power_containment = true;
pi->caps_cac = true;
pi->didt_enabled = false;
@@ -435,7 +435,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->caps_td_ramping = true;
pi->caps_tcp_ramping = true;
}
- pi->caps_sclk_ds = true;
+ if (amdgpu_sclk_deep_sleep_en)
+ pi->caps_sclk_ds = true;
+ else
+ pi->caps_sclk_ds = false;
+
pi->voting_clients = 0x00c00033;
pi->auto_thermal_throttling_enabled = true;
pi->bapm_enabled = false;
@@ -2108,29 +2112,58 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
/* disable clockgating so we can properly shut down the block */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
+ return;
+ }
+
/* shutdown the UVD block */
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
- /* XXX: check for errors */
+
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n");
+ return;
+ }
}
cz_update_uvd_dpm(adev, gate);
- if (pi->caps_uvd_pg)
+ if (pi->caps_uvd_pg) {
/* power off the UVD block */
- cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+ ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n");
+ return;
+ }
+ }
} else {
if (pi->caps_uvd_pg) {
/* power on the UVD block */
if (pi->uvd_dynamic_pg)
- cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
+ ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
else
- cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+ ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n");
+ return;
+ }
+
/* re-init the UVD block */
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
+
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n");
+ return;
+ }
+
/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
- /* XXX: check for errors */
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
+ return;
+ }
}
cz_update_uvd_dpm(adev, gate);
}
@@ -2219,6 +2252,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
}
} else { /*pi->caps_vce_pg*/
+ pi->vce_power_gated = gate;
cz_update_vce_dpm(adev);
cz_enable_vce_dpm(adev, !gate);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 8227344d2ff6..bc5bb4eb9625 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -646,8 +646,8 @@ static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
if (save->crtc_enabled[i]) {
tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
+ if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
+ tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
@@ -712,6 +712,45 @@ static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
+static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ num_crtc = 6;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v10_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v10_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -2071,6 +2110,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2182,8 +2222,9 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -2275,8 +2316,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -2667,19 +2708,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v10_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2696,7 +2739,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.gamma_set = dce_v10_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy,
- .page_flip = amdgpu_crtc_page_flip,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
};
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2717,13 +2760,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v10_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled) {
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2960,10 +3003,11 @@ static int dce_v10_0_early_init(void *handle)
dce_v10_0_set_display_funcs(adev);
dce_v10_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_FIJI:
case CHIP_TONGA:
- adev->mode_info.num_crtc = 6; /* XXX 7??? */
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
break;
@@ -3139,11 +3183,26 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
+static int dce_v10_0_check_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (dce_v10_0_is_display_hung(adev))
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
+ else
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
+
+ return 0;
+}
+
static int dce_v10_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
+ return 0;
+
if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
@@ -3372,7 +3431,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
@@ -3510,6 +3569,7 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.resume = dce_v10_0_resume,
.is_idle = dce_v10_0_is_idle,
.wait_for_idle = dce_v10_0_wait_for_idle,
+ .check_soft_reset = dce_v10_0_check_soft_reset,
.soft_reset = dce_v10_0_soft_reset,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index 1bfa48ddd8a6..e3dc04d293e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+void dce_v10_0_disable_dce(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index af26ec0bc59d..b93eba077950 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -307,11 +307,10 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
u32 tmp;
- /* flip at hsync for async, default is vsync */
- /* use UPDATE_IMMEDIATE_EN instead for async? */
+ /* flip immediate for async, default is vsync */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+ GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -674,6 +673,53 @@ static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
+static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ num_crtc = 3;
+ break;
+ case CHIP_STONEY:
+ num_crtc = 2;
+ break;
+ case CHIP_POLARIS10:
+ num_crtc = 6;
+ break;
+ case CHIP_POLARIS11:
+ num_crtc = 5;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v11_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v11_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -2047,6 +2093,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2158,8 +2205,9 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -2251,8 +2299,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -2678,19 +2726,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v11_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2707,7 +2757,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.gamma_set = dce_v11_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config,
.destroy = dce_v11_0_crtc_destroy,
- .page_flip = amdgpu_crtc_page_flip,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
};
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2728,13 +2778,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v11_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled) {
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2998,24 +3048,22 @@ static int dce_v11_0_early_init(void *handle)
dce_v11_0_set_display_funcs(adev);
dce_v11_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_CARRIZO:
- adev->mode_info.num_crtc = 3;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_STONEY:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_POLARIS10:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_POLARIS11:
- adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
@@ -3433,7 +3481,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 84e4618f5253..1f58a65ba2ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3fb65e41a6ef..abd5213dfe18 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -526,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
-#if 0
- u32 frame_count;
- int j;
-
+#if 1
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- amdgpu_display_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ /*it is correct only for RGB ; black is 0*/
+ WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
+ mdelay(20);
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
@@ -575,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
- u32 tmp, frame_count;
- int i, j;
+ u32 tmp;
+ int i;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
- break;
- udelay(1);
- }
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
}
+ mdelay(20);
}
WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
@@ -657,6 +604,52 @@ static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
+static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ num_crtc = 6;
+ break;
+ case CHIP_KAVERI:
+ num_crtc = 4;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ num_crtc = 2;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v8_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v8_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -1554,13 +1547,13 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
- value = (sad->channels <<
- AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
- (sad->byte2 <<
- AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
- (sad->freq <<
- AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
- max_channels = sad->channels;
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
+ max_channels = sad->channels;
}
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
@@ -1666,7 +1659,7 @@ static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
- WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
+ WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
@@ -1746,6 +1739,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
+
offset = dig->afmt->offset;
/* hdmi deep color mode general control packets setup, if bpc > 8 */
@@ -1870,7 +1864,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
- HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
+ HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
(2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
@@ -1879,13 +1873,12 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
- /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
- /* enable audio after to setting up hw */
+ /* enable audio after setting up hw */
dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
}
@@ -2005,6 +1998,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2052,7 +2046,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
- (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
@@ -2109,8 +2103,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -2190,8 +2185,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -2574,19 +2569,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v8_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2603,7 +2600,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.gamma_set = dce_v8_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config,
.destroy = dce_v8_0_crtc_destroy,
- .page_flip = amdgpu_crtc_page_flip,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
};
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2624,13 +2621,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v8_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled) {
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2704,7 +2701,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
case ATOM_PPLL2:
/* disable the ppll */
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
case ATOM_PPLL0:
/* disable the ppll */
@@ -2854,21 +2851,20 @@ static int dce_v8_0_early_init(void *handle)
dce_v8_0_set_display_funcs(adev);
dce_v8_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_KAVERI:
- adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6; /* ? */
break;
@@ -3287,7 +3283,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
drm_handle_vblank(adev->ddev, crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
@@ -3296,7 +3291,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
@@ -3376,7 +3370,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 77016852b252..7d0770c3a49b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+void dce_v8_0_disable_dce(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
new file mode 100644
index 000000000000..00663a7b4053
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -0,0 +1,806 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "dce_v8_0.h"
+#endif
+#include "dce_v10_0.h"
+#include "dce_v11_0.h"
+#include "dce_virtual.h"
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+
+/**
+ * dce_virtual_vblank_wait - vblank wait asic callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+ return;
+}
+
+static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ return 0;
+}
+
+static void dce_virtual_page_flip(struct amdgpu_device *adev,
+ int crtc_id, u64 crtc_base, bool async)
+{
+ return;
+}
+
+static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ *vbl = 0;
+ *position = 0;
+
+ return -EINVAL;
+}
+
+static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ return true;
+}
+
+static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ return;
+}
+
+static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
+{
+ return false;
+}
+
+void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ dce_v8_0_disable_dce(adev);
+#endif
+ break;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ dce_v10_0_disable_dce(adev);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ dce_v11_0_disable_dce(adev);
+ break;
+ case CHIP_TOPAZ:
+ /* no DCE */
+ return;
+ default:
+ DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+ }
+
+ return;
+}
+void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ return;
+}
+
+void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
+ bool render)
+{
+ return;
+}
+
+/**
+ * dce_virtual_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
+{
+ return;
+}
+
+static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t size)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int i;
+
+ /* userspace palettes are always correct as is */
+ for (i = 0; i < size; i++) {
+ amdgpu_crtc->lut_r[i] = red[i] >> 6;
+ amdgpu_crtc->lut_g[i] = green[i] >> 6;
+ amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+ }
+
+ return 0;
+}
+
+static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
+ .cursor_set2 = NULL,
+ .cursor_move = NULL,
+ .gamma_set = dce_virtual_crtc_gamma_set,
+ .set_config = amdgpu_crtc_set_config,
+ .destroy = dce_virtual_crtc_destroy,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ amdgpu_crtc->enabled = true;
+ /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
+ amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+ drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ amdgpu_crtc->enabled = false;
+ break;
+ }
+}
+
+
+static void dce_virtual_crtc_prepare(struct drm_crtc *crtc)
+{
+ dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
+{
+ dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->primary->fb) {
+ int r;
+ struct amdgpu_framebuffer *amdgpu_fb;
+ struct amdgpu_bo *rbo;
+
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ else {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ }
+ }
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+}
+
+static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ /* update the hw version fpr dpm */
+ amdgpu_crtc->hw_mode = *adjusted_mode;
+
+ return 0;
+}
+
+static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ amdgpu_crtc->encoder = encoder;
+ amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+ break;
+ }
+ }
+ if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+
+static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return 0;
+}
+
+static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc)
+{
+ return;
+}
+
+static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return 0;
+}
+
+static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
+ .dpms = dce_virtual_crtc_dpms,
+ .mode_fixup = dce_virtual_crtc_mode_fixup,
+ .mode_set = dce_virtual_crtc_mode_set,
+ .mode_set_base = dce_virtual_crtc_set_base,
+ .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
+ .prepare = dce_virtual_crtc_prepare,
+ .commit = dce_virtual_crtc_commit,
+ .load_lut = dce_virtual_crtc_load_lut,
+ .disable = dce_virtual_crtc_disable,
+};
+
+static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ int i;
+
+ amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+ (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (amdgpu_crtc == NULL)
+ return -ENOMEM;
+
+ drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+ amdgpu_crtc->crtc_id = index;
+ adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+ for (i = 0; i < 256; i++) {
+ amdgpu_crtc->lut_r[i] = i << 2;
+ amdgpu_crtc->lut_g[i] = i << 2;
+ amdgpu_crtc->lut_b[i] = i << 2;
+ }
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int dce_virtual_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
+ dce_virtual_set_display_funcs(adev);
+ dce_virtual_set_irq_funcs(adev);
+
+ adev->mode_info.num_crtc = 1;
+ adev->mode_info.num_hpd = 1;
+ adev->mode_info.num_dig = 1;
+ return 0;
+}
+
+static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_i2c_bus_rec ddc_bus;
+ struct amdgpu_router router;
+ struct amdgpu_hpd hpd;
+
+ /* look up gpio for ddc, hpd */
+ ddc_bus.valid = false;
+ hpd.hpd = AMDGPU_HPD_NONE;
+ /* needed for aux chan transactions */
+ ddc_bus.hpd = hpd.hpd;
+
+ memset(&router, 0, sizeof(router));
+ router.ddc_valid = false;
+ router.cd_valid = false;
+ amdgpu_display_add_connector(adev,
+ 0,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
+ CONNECTOR_OBJECT_ID_VIRTUAL,
+ &hpd,
+ &router);
+
+ amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 0);
+
+ amdgpu_link_encoder_connector(adev->ddev);
+
+ return true;
+}
+
+static int dce_virtual_sw_init(void *handle)
+{
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
+ if (r)
+ return r;
+
+ adev->ddev->max_vblank_count = 0;
+
+ adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ /* allocate crtcs */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = dce_virtual_crtc_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ dce_virtual_get_connector_info(adev);
+ amdgpu_print_display_setup(adev->ddev);
+
+ drm_kms_helper_poll_init(adev->ddev);
+
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
+}
+
+static int dce_virtual_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ kfree(adev->mode_info.bios_hardcoded_edid);
+
+ drm_kms_helper_poll_fini(adev->ddev);
+
+ drm_mode_config_cleanup(adev->ddev);
+ adev->mode_info.mode_config_initialized = false;
+ return 0;
+}
+
+static int dce_virtual_hw_init(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_hw_fini(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_suspend(void *handle)
+{
+ return dce_virtual_hw_fini(handle);
+}
+
+static int dce_virtual_resume(void *handle)
+{
+ int ret;
+
+ ret = dce_virtual_hw_init(handle);
+
+ return ret;
+}
+
+static bool dce_virtual_is_idle(void *handle)
+{
+ return true;
+}
+
+static int dce_virtual_wait_for_idle(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dce_virtual_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs dce_virtual_ip_funcs = {
+ .name = "dce_virtual",
+ .early_init = dce_virtual_early_init,
+ .late_init = NULL,
+ .sw_init = dce_virtual_sw_init,
+ .sw_fini = dce_virtual_sw_fini,
+ .hw_init = dce_virtual_hw_init,
+ .hw_fini = dce_virtual_hw_fini,
+ .suspend = dce_virtual_suspend,
+ .resume = dce_virtual_resume,
+ .is_idle = dce_virtual_is_idle,
+ .wait_for_idle = dce_virtual_wait_for_idle,
+ .soft_reset = dce_virtual_soft_reset,
+ .set_clockgating_state = dce_virtual_set_clockgating_state,
+ .set_powergating_state = dce_virtual_set_powergating_state,
+};
+
+/* these are handled by the primary encoders */
+static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static void
+dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return;
+}
+
+static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static void
+dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ return;
+}
+
+static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ /* set the active encoder to connector routing */
+ amdgpu_encoder_set_active_device(encoder);
+
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
+ .dpms = dce_virtual_encoder_dpms,
+ .mode_fixup = dce_virtual_encoder_mode_fixup,
+ .prepare = dce_virtual_encoder_prepare,
+ .mode_set = dce_virtual_encoder_mode_set,
+ .commit = dce_virtual_encoder_commit,
+ .disable = dce_virtual_encoder_disable,
+};
+
+static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ kfree(amdgpu_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
+ .destroy = dce_virtual_encoder_destroy,
+};
+
+static void dce_virtual_encoder_add(struct amdgpu_device *adev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (amdgpu_encoder->encoder_enum == encoder_enum) {
+ amdgpu_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+ if (!amdgpu_encoder)
+ return;
+
+ encoder = &amdgpu_encoder->base;
+ encoder->possible_crtcs = 0x1;
+ amdgpu_encoder->enc_priv = NULL;
+ amdgpu_encoder->encoder_enum = encoder_enum;
+ amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ amdgpu_encoder->devices = supported_device;
+ amdgpu_encoder->rmx_type = RMX_OFF;
+ amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+ amdgpu_encoder->is_ext_encoder = false;
+ amdgpu_encoder->caps = caps;
+
+ drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+ DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+}
+
+static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
+ .set_vga_render_state = &dce_virtual_set_vga_render_state,
+ .bandwidth_update = &dce_virtual_bandwidth_update,
+ .vblank_get_counter = &dce_virtual_vblank_get_counter,
+ .vblank_wait = &dce_virtual_vblank_wait,
+ .is_display_hung = &dce_virtual_is_display_hung,
+ .backlight_set_level = NULL,
+ .backlight_get_level = NULL,
+ .hpd_sense = &dce_virtual_hpd_sense,
+ .hpd_set_polarity = &dce_virtual_hpd_set_polarity,
+ .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
+ .page_flip = &dce_virtual_page_flip,
+ .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
+ .add_encoder = &dce_virtual_encoder_add,
+ .add_connector = &amdgpu_connector_add,
+ .stop_mc_access = &dce_virtual_stop_mc_access,
+ .resume_mc_access = &dce_virtual_resume_mc_access,
+};
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dce_virtual_display_funcs;
+}
+
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+ struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
+ struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
+ unsigned crtc = 0;
+ drm_handle_vblank(adev->ddev, crtc);
+ dce_virtual_pageflip_irq(adev, NULL, NULL);
+ hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ if (state && !adev->mode_info.vsync_timer_enabled) {
+ DRM_DEBUG("Enable software vsync timer\n");
+ hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
+ hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ } else if (!state && adev->mode_info.vsync_timer_enabled) {
+ DRM_DEBUG("Disable software vsync timer\n");
+ hrtimer_cancel(&adev->mode_info.vblank_timer);
+ }
+
+ adev->mode_info.vsync_timer_enabled = state;
+ DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (type) {
+ case AMDGPU_CRTC_IRQ_VBLANK1:
+ dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
+ int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+}
+
+static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned crtc = 0;
+ unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
+
+ dce_virtual_crtc_vblank_int_ack(adev, crtc);
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
+ }
+ dce_virtual_pageflip_irq(adev, NULL, NULL);
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ return 0;
+}
+
+static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ if (type >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", type);
+ return -EINVAL;
+ }
+ DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
+
+ return 0;
+}
+
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned long flags;
+ unsigned crtc_id = 0;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_flip_work *works;
+
+ crtc_id = 0;
+ amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+ if (crtc_id >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+ return -EINVAL;
+ }
+
+ /* IRQ could occur when in initial stage */
+ if (amdgpu_crtc == NULL)
+ return 0;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ works = amdgpu_crtc->pflip_works;
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+ "AMDGPU_FLIP_SUBMITTED(%d)\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return 0;
+ }
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ amdgpu_crtc->pflip_works = NULL;
+
+ /* wakeup usersapce */
+ if (works->event)
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ schedule_work(&works->unpin_work);
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
+ .set = dce_virtual_set_crtc_irq_state,
+ .process = dce_virtual_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
+ .set = dce_virtual_set_pageflip_irq_state,
+ .process = dce_virtual_pageflip_irq,
+};
+
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
new file mode 100644
index 000000000000..e239243f6ebc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DCE_VIRTUAL_H__
+#define __DCE_VIRTUAL_H__
+
+extern const struct amd_ip_funcs dce_virtual_ip_funcs;
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index b336c918d6a7..b3e19ba4c57f 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -173,7 +173,7 @@ static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
{
if (!fiji_is_smc_ram_running(adev))
{
- return -EINVAL;;
+ return -EINVAL;
}
if (wait_smu_response(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index fc8ff4d3ccf8..f4fbec3e224e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1583,9 +1583,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
* registers are instanced per SE or SH. 0xffffffff means
* broadcast to all SEs or SHs (CIK).
*/
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK;
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
@@ -1659,13 +1665,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v7_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -1746,7 +1752,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2050,17 +2056,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- if (ctx_switch)
- next_rptr += 2;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ctx_switch) {
@@ -2089,22 +2084,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
- u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- control |= INDIRECT_BUFFER_VALID;
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
-
- header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
- control |= ib->length_dw | (vm_id << 24);
-
- amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
@@ -2123,26 +2105,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
* Provides a basic gfx ring test to verify that IBs are working.
* Returns 0 on success, error on failure.
*/
-static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
+static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
- int r;
+ long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -2154,21 +2135,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err2;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err2;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
+ }
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -2176,7 +2155,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
}
err2:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err1:
@@ -3221,7 +3199,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
}
}
adev->gfx.rlc.cs_data = ci_cs_data;
- adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+ adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
src_ptr = adev->gfx.rlc.reg_list;
dws = adev->gfx.rlc.reg_list_size;
@@ -3379,7 +3358,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3387,7 +3366,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3434,7 +3413,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
return orig;
}
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
{
u32 tmp, i, mask;
@@ -3456,7 +3435,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
}
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
{
u32 tmp;
@@ -3471,7 +3450,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
*
* Halt the RLC ME (MicroEngine) (CIK).
*/
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
{
WREG32(mmRLC_CNTL, 0);
@@ -3547,7 +3526,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(mmRLC_LB_PARAMS, 0x00600408);
WREG32(mmRLC_LB_CNTL, 0x80000004);
@@ -3587,7 +3566,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3638,7 +3617,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3689,7 +3668,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
@@ -3867,6 +3846,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
}
}
+static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
@@ -4123,7 +4116,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -4183,12 +4176,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v7_0_select_se_sh,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
+ .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
+ .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+};
+
static int gfx_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
+ adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
gfx_v7_0_set_ring_funcs(adev);
gfx_v7_0_set_irq_funcs(adev);
gfx_v7_0_set_gds_init(adev);
@@ -4460,24 +4465,21 @@ static int gfx_v7_0_sw_init(void *handle)
}
/* reserve GDS, GWS and OA resource for gfx */
- r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GDS, 0,
- NULL, NULL, &adev->gds.gds_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+ &adev->gds.gds_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GWS, 0,
- NULL, NULL, &adev->gds.gws_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+ &adev->gds.gws_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_OA, 0,
- NULL, NULL, &adev->gds.oa_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+ &adev->gds.oa_gfx_bo, NULL, NULL);
if (r)
return r;
@@ -5032,16 +5034,22 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ unsigned disable_masks[4 * 2];
memset(cu_info, 0, sizeof(*cu_info));
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v7_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
@@ -5057,7 +5065,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index e747aa935c88..94e3ea147c26 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -26,11 +26,4 @@
extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index c2ef94511f70..c6a63c2f91e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -270,7 +270,8 @@ static const u32 tonga_mgcg_cgcg_init[] =
static const u32 golden_settings_polaris11_a11[] =
{
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+ mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -279,7 +280,7 @@ static const u32 golden_settings_polaris11_a11[] =
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
- mmSQ_CONFIG, 0x07f80000, 0x07180000,
+ mmSQ_CONFIG, 0x07f80000, 0x01180000,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
@@ -301,8 +302,8 @@ static const u32 polaris11_golden_common_all[] =
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
- mmCB_HW_CONTROL_2, 0, 0x0f000000,
+ mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -409,6 +410,7 @@ static const u32 golden_settings_iceland_a11[] =
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -505,8 +507,10 @@ static const u32 cz_golden_settings_a11[] =
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
@@ -699,7 +703,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
polaris10_golden_common_all,
(const u32)ARRAY_SIZE(polaris10_golden_common_all));
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
- if (adev->pdev->revision == 0xc7) {
+ if (adev->pdev->revision == 0xc7 &&
+ ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
+ (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
+ (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
}
@@ -787,26 +794,25 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
return r;
}
-static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
+static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
- int r;
+ long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -818,28 +824,25 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err2;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
goto err2;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
+ }
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
err2:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err1:
@@ -1160,6 +1163,71 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(0);
}
+static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ if (adev->asic_type == CHIP_CARRIZO)
+ max_me = 5;
+
+ /* write the cp table buffer */
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 4) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+
static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
{
int r;
@@ -1168,13 +1236,23 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
adev->gfx.rlc.clear_state_obj = NULL;
}
+
+ /* jump table block */
+ if (adev->gfx.rlc.cp_table_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+ adev->gfx.rlc.cp_table_obj = NULL;
+ }
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
@@ -1213,14 +1291,14 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
&adev->gfx.rlc.clear_state_gpu_addr);
if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
@@ -1231,6 +1309,45 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+ if (adev->gfx.rlc.cp_table_obj == NULL) {
+ r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL,
+ &adev->gfx.rlc.cp_table_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0)) {
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
+ return r;
+ }
+ r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ return r;
+ }
+
+ cz_init_cp_jump_table(adev);
+
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ }
+
return 0;
}
@@ -1244,7 +1361,6 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
adev->gfx.mec.hpd_eop_obj = NULL;
}
@@ -1612,7 +1728,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
RREG32(sec_ded_counter_registers[i]);
fail:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
@@ -1966,24 +2081,21 @@ static int gfx_v8_0_sw_init(void *handle)
}
/* reserve GDS, GWS and OA resource for gfx */
- r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
- NULL, &adev->gds.gds_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+ &adev->gds.gds_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
- NULL, &adev->gds.gws_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+ &adev->gds.gws_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_OA, 0, NULL,
- NULL, &adev->gds.oa_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+ &adev->gds.oa_gfx_bo, NULL, NULL);
if (r)
return r;
@@ -2011,9 +2123,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
gfx_v8_0_mec_fini(adev);
-
gfx_v8_0_rlc_fini(adev);
-
gfx_v8_0_free_microcode(adev);
return 0;
@@ -3339,23 +3449,26 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
}
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ u32 data;
- if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (se_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
+
+ if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (sh_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- } else {
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- }
+
WREG32(mmGRBM_GFX_INDEX, data);
}
@@ -3368,11 +3481,10 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
- data = RREG32(mmCC_RB_BACKEND_DISABLE);
- data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ data = RREG32(mmCC_RB_BACKEND_DISABLE) |
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+ data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se);
@@ -3391,13 +3503,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v8_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -3454,16 +3566,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- tmp = RREG32(mmGRBM_CNTL);
- tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
- WREG32(mmGRBM_CNTL, tmp);
-
+ WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
gfx_v8_0_tiling_mode_table_init(adev);
-
gfx_v8_0_setup_rb(adev);
gfx_v8_0_get_cu_info(adev);
@@ -3501,7 +3609,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
@@ -3524,7 +3632,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3532,7 +3640,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3647,9 +3755,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
sizeof(indirect_start_offsets)/sizeof(int));
/* save and restore list */
- temp = RREG32(mmRLC_SRM_CNTL);
- temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
- WREG32(mmRLC_SRM_CNTL, temp);
+ WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
WREG32(mmRLC_SRM_ARAM_ADDR, 0);
for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
@@ -3686,44 +3792,46 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
- uint32_t data;
-
- data = RREG32(mmRLC_SRM_CNTL);
- data |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
- WREG32(mmRLC_SRM_CNTL, data);
+ WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
}
-static void polaris11_init_power_gating(struct amdgpu_device *adev)
+static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
{
uint32_t data;
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG)) {
- data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
- data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
- data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
- WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
-
- data = 0;
- data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
- data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
- data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
- data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
- WREG32(mmRLC_PG_DELAY, data);
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG)) {
+ WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
- data = RREG32(mmRLC_PG_DELAY_2);
- data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
- data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
- WREG32(mmRLC_PG_DELAY_2, data);
+ data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
+ WREG32(mmRLC_PG_DELAY, data);
- data = RREG32(mmRLC_AUTO_PG_CTRL);
- data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
- data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
- WREG32(mmRLC_AUTO_PG_CTRL, data);
+ WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
}
}
+static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
+}
+
+static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
+}
+
+static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
+}
+
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
{
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
@@ -3736,41 +3844,48 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
- if (adev->asic_type == CHIP_POLARIS11)
- polaris11_init_power_gating(adev);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+ if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+ cz_enable_sck_slow_down_on_power_up(adev, true);
+ cz_enable_sck_slow_down_on_power_down(adev, true);
+ } else {
+ cz_enable_sck_slow_down_on_power_up(adev, false);
+ cz_enable_sck_slow_down_on_power_down(adev, false);
+ }
+ if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+ cz_enable_cp_power_gating(adev, true);
+ else
+ cz_enable_cp_power_gating(adev, false);
+ } else if (adev->asic_type == CHIP_POLARIS11) {
+ gfx_v8_0_init_power_gating(adev);
+ }
}
}
void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmRLC_CNTL);
-
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
- WREG32(mmRLC_CNTL, tmp);
+ WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
-
gfx_v8_0_wait_for_rlc_serdes(adev);
}
static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmGRBM_SOFT_RESET);
-
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- WREG32(mmGRBM_SOFT_RESET, tmp);
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
- WREG32(mmGRBM_SOFT_RESET, tmp);
+
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmRLC_CNTL);
-
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
- WREG32(mmRLC_CNTL, tmp);
+ WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU))
@@ -3812,14 +3927,13 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10)
+ adev->asic_type == CHIP_POLARIS10)
WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
/* disable PG */
WREG32(mmRLC_PG_CNTL, 0);
gfx_v8_0_rlc_reset(adev);
-
gfx_v8_0_init_pg(adev);
if (!adev->pp_enabled) {
@@ -4114,12 +4228,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
gfx_v8_0_cp_gfx_start(adev);
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
- if (r) {
+ if (r)
ring->ready = false;
- return r;
- }
- return 0;
+ return r;
}
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4794,7 +4906,6 @@ static int gfx_v8_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v8_0_init_golden_registers(adev);
-
gfx_v8_0_gpu_init(adev);
r = gfx_v8_0_rlc_resume(adev);
@@ -4802,8 +4913,6 @@ static int gfx_v8_0_hw_init(void *handle)
return r;
r = gfx_v8_0_cp_resume(adev);
- if (r)
- return r;
return r;
}
@@ -4851,25 +4960,22 @@ static bool gfx_v8_0_is_idle(void *handle)
static int gfx_v8_0_wait_for_idle(void *handle)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
-
- if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+ if (gfx_v8_0_is_idle(handle))
return 0;
+
udelay(1);
}
return -ETIMEDOUT;
}
-static int gfx_v8_0_soft_reset(void *handle)
+static int gfx_v8_0_check_soft_reset(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* GRBM_STATUS */
tmp = RREG32(mmGRBM_STATUS);
@@ -4878,16 +4984,12 @@ static int gfx_v8_0_soft_reset(void *handle)
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
- GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
+ GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
+ GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
- }
-
- if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
- grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
- GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
}
@@ -4898,73 +5000,199 @@ static int gfx_v8_0_soft_reset(void *handle)
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+ if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
+ REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
+ REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+ SOFT_RESET_CPF, 1);
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+ SOFT_RESET_CPC, 1);
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+ SOFT_RESET_CPG, 1);
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+ SOFT_RESET_GRBM, 1);
+ }
+
/* SRBM_STATUS */
tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+ if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
if (grbm_soft_reset || srbm_soft_reset) {
- /* stop the rlc */
- gfx_v8_0_rlc_stop(adev);
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
+ adev->gfx.grbm_soft_reset = grbm_soft_reset;
+ adev->gfx.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
+ adev->gfx.grbm_soft_reset = 0;
+ adev->gfx.srbm_soft_reset = 0;
+ }
+
+ return 0;
+}
+
+static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ int i;
+
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
+ u32 tmp;
+ tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
+ DEQUEUE_REQ, 2);
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
+ break;
+ udelay(1);
+ }
+ }
+}
+
+static int gfx_v8_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ return 0;
+
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
+ /* stop the rlc */
+ gfx_v8_0_rlc_stop(adev);
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
/* Disable GFX parsing/prefetching */
gfx_v8_0_cp_gfx_enable(adev, false);
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ gfx_v8_0_inactive_hqd(adev, ring);
+ }
/* Disable MEC parsing/prefetching */
gfx_v8_0_cp_compute_enable(adev, false);
+ }
- if (grbm_soft_reset || srbm_soft_reset) {
- tmp = RREG32(mmGMCON_DEBUG);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_STALL, 1);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_CLEAR, 1);
- WREG32(mmGMCON_DEBUG, tmp);
+ return 0;
+}
- udelay(50);
- }
+static int gfx_v8_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
- if (grbm_soft_reset) {
- tmp = RREG32(mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ return 0;
- udelay(50);
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
- tmp &= ~grbm_soft_reset;
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
- }
+ if (grbm_soft_reset || srbm_soft_reset) {
+ tmp = RREG32(mmGMCON_DEBUG);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
+ WREG32(mmGMCON_DEBUG, tmp);
+ udelay(50);
+ }
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
+ if (grbm_soft_reset) {
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ }
- if (grbm_soft_reset || srbm_soft_reset) {
- tmp = RREG32(mmGMCON_DEBUG);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_STALL, 0);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_CLEAR, 0);
- WREG32(mmGMCON_DEBUG, tmp);
- }
+ if (srbm_soft_reset) {
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ }
+
+ if (grbm_soft_reset || srbm_soft_reset) {
+ tmp = RREG32(mmGMCON_DEBUG);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
+ WREG32(mmGMCON_DEBUG, tmp);
+ }
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ return 0;
+}
+
+static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
+ WREG32(mmCP_HQD_PQ_RPTR, 0);
+ WREG32(mmCP_HQD_PQ_WPTR, 0);
+ vi_srbm_select(adev, 0, 0, 0, 0);
+}
+
+static int gfx_v8_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ return 0;
+
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
+ gfx_v8_0_cp_gfx_resume(adev);
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ gfx_v8_0_init_hqd(adev, ring);
+ }
+ gfx_v8_0_cp_compute_resume(adev);
}
+ gfx_v8_0_rlc_start(adev);
+
return 0;
}
@@ -4976,7 +5204,7 @@ static int gfx_v8_0_soft_reset(void *handle)
* Fetches a GPU clock counter snapshot.
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -5036,12 +5264,18 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v8_0_select_se_sh,
+};
+
static int gfx_v8_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
gfx_v8_0_set_ring_funcs(adev);
gfx_v8_0_set_irq_funcs(adev);
gfx_v8_0_set_gds_init(adev);
@@ -5074,71 +5308,57 @@ static int gfx_v8_0_late_init(void *handle)
return 0;
}
-static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
{
- uint32_t data, temp;
-
- /* Send msg to SMU via Powerplay */
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
-
- if (enable) {
- /* Enable static MGPG */
- temp = data = RREG32(mmRLC_PG_CNTL);
- data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
- data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+ if (adev->asic_type == CHIP_POLARIS11)
+ /* Send msg to SMU via Powerplay */
+ amdgpu_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+ enable ?
+ AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+
+ WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
+}
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- }
+static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
-static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- uint32_t data, temp;
-
- if (enable) {
- /* Enable dynamic MGPG */
- temp = data = RREG32(mmRLC_PG_CNTL);
- data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
- data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+ WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
+}
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- }
+static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
}
-static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
- bool enable)
+static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
+ bool enable)
{
- uint32_t data, temp;
+ WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
- if (enable) {
- /* Enable quick PG */
- temp = data = RREG32(mmRLC_PG_CNTL);
- data |= 0x100000;
+ /* Read any GFX register to wake up GFX. */
+ if (!enable)
+ RREG32(mmDB_RENDER_CONTROL);
+}
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
+ cz_enable_gfx_cg_power_gating(adev, true);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+ cz_enable_gfx_pipeline_power_gating(adev, true);
} else {
- temp = data = RREG32(mmRLC_PG_CNTL);
- data &= ~0x100000;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+ cz_enable_gfx_cg_power_gating(adev, false);
+ cz_enable_gfx_pipeline_power_gating(adev, false);
}
}
@@ -5146,21 +5366,42 @@ static int gfx_v8_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
+ cz_update_gfx_cg_power_gating(adev, enable);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+ break;
case CHIP_POLARIS11:
- if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)
- polaris11_enable_gfx_static_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
- else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)
- polaris11_enable_gfx_dynamic_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
+ polaris11_enable_gfx_quick_mg_power_gating(adev, true);
else
- polaris11_enable_gfx_quick_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ polaris11_enable_gfx_quick_mg_power_gating(adev, false);
break;
default:
break;
@@ -5174,22 +5415,22 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
{
uint32_t data;
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RREG32(mmRLC_SERDES_WR_CTRL);
if (adev->asic_type == CHIP_STONEY)
- data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
- RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
- RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
- RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
- RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
- RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
- RLC_SERDES_WR_CTRL__POWER_UP_MASK |
- RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
- RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
+ data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+ RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
+ RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
+ RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
+ RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
+ RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
+ RLC_SERDES_WR_CTRL__POWER_UP_MASK |
+ RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
+ RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
else
data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
@@ -5212,10 +5453,10 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
#define MSG_ENTER_RLC_SAFE_MODE 1
#define MSG_EXIT_RLC_SAFE_MODE 0
-
-#define RLC_GPR_REG2__REQ_MASK 0x00000001
-#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
-#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
+#define RLC_GPR_REG2__REQ_MASK 0x00000001
+#define RLC_GPR_REG2__REQ__SHIFT 0
+#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
+#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
{
@@ -5245,7 +5486,7 @@ static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
break;
udelay(1);
}
@@ -5273,7 +5514,7 @@ static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
break;
udelay(1);
}
@@ -5305,7 +5546,7 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
@@ -5332,7 +5573,7 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
@@ -5373,21 +5614,12 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
- if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
/* 1 - RLC memory Light sleep */
- temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
- data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
- if (temp != data)
- WREG32(mmRLC_MEM_SLP_CNTL, data);
- }
+ WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
- if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
- /* 2 - CP memory Light sleep */
- temp = data = RREG32(mmCP_MEM_SLP_CNTL);
- data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
- if (temp != data)
- WREG32(mmCP_MEM_SLP_CNTL, data);
- }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
+ WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
}
/* 3 - RLC_CGTT_MGCG_OVERRIDE */
@@ -5562,6 +5794,8 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
}
+ gfx_v8_0_wait_for_rlc_serdes(adev);
+
adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -5603,25 +5837,18 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
{
- u32 rptr;
-
- rptr = ring->adev->wb.wb[ring->rptr_offs];
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs];
}
static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 wptr;
if (ring->use_doorbell)
/* XXX check if swapping is necessary on BE */
- wptr = ring->adev->wb.wb[ring->wptr_offs];
+ return ring->adev->wb.wb[ring->wptr_offs];
else
- wptr = RREG32(mmCP_RB0_WPTR);
-
- return wptr;
+ return RREG32(mmCP_RB0_WPTR);
}
static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
@@ -5687,17 +5914,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- if (ctx_switch)
- next_rptr += 2;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ctx_switch) {
@@ -5726,28 +5942,14 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
- u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- control |= INDIRECT_BUFFER_VALID;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
-
- header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
-
- control |= ib->length_dw | (vm_id << 24);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
- amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
- (2 << 0) |
+ (2 << 0) |
#endif
- (ib->gpu_addr & 0xFFFFFFFC));
+ (ib->gpu_addr & 0xFFFFFFFC));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
amdgpu_ring_write(ring, control);
}
@@ -5892,33 +6094,14 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 0);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl =
- REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 1);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
}
static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
int me, int pipe,
enum amdgpu_interrupt_state state)
{
- u32 mec_int_cntl, mec_int_cntl_reg;
-
/*
* amdgpu controls only pipe 0 of MEC1. That's why this function only
* handles the setting of interrupts for this specific pipe. All other
@@ -5928,7 +6111,6 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
if (me == 1) {
switch (pipe) {
case 0:
- mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
break;
default:
DRM_DEBUG("invalid pipe %d\n", pipe);
@@ -5939,22 +6121,8 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
return;
}
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
- mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
- TIME_STAMP_INT_ENABLE, 0);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
- mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
- TIME_STAMP_INT_ENABLE, 1);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
}
static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
@@ -5962,24 +6130,8 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 0);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 1);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
return 0;
}
@@ -5989,24 +6141,8 @@ static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 0);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 1);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
return 0;
}
@@ -6112,7 +6248,10 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.resume = gfx_v8_0_resume,
.is_idle = gfx_v8_0_is_idle,
.wait_for_idle = gfx_v8_0_wait_for_idle,
+ .check_soft_reset = gfx_v8_0_check_soft_reset,
+ .pre_soft_reset = gfx_v8_0_pre_soft_reset,
.soft_reset = gfx_v8_0_soft_reset,
+ .post_soft_reset = gfx_v8_0_post_soft_reset,
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
};
@@ -6195,9 +6334,9 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- case CHIP_STONEY:
adev->gfx.rlc.funcs = &iceland_rlc_funcs;
break;
+ case CHIP_STONEY:
case CHIP_CARRIZO:
adev->gfx.rlc.funcs = &cz_rlc_funcs;
break;
@@ -6235,19 +6374,30 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
}
}
+static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
- data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
- data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-
- data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
- data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
+ RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
- return (~data) & mask;
+ return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
}
static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
@@ -6255,16 +6405,22 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ unsigned disable_masks[4 * 2];
memset(cu_info, 0, sizeof(*cu_info));
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v8_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
@@ -6280,7 +6436,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index 16a49f53a2fa..ebed1f829297 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -26,7 +26,4 @@
extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 1feb6439cb0b..aa0c4b964621 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -39,6 +39,7 @@
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v7_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_mc.bin");
@@ -73,39 +74,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
}
}
-/**
- * gmc7_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- unsigned i;
- u32 tmp;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
- if (!tmp)
- return 0;
- udelay(1);
- }
- return -1;
-}
-
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
- amdgpu_asic_wait_for_mc_idle(adev);
+ gmc_v7_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -120,8 +97,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 tmp;
@@ -167,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
return 0;
default: BUG();
}
@@ -205,7 +183,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 running, blackout = 0;
+ u32 running;
int i, ucode_size, regs_size;
if (!adev->mc.fw)
@@ -225,11 +203,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
if (running == 0) {
- if (running) {
- blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
- }
-
/* reset the engine and set to writable */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -261,9 +234,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
break;
udelay(1);
}
-
- if (running)
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -311,7 +281,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
amdgpu_display_set_vga_render_state(adev, false);
gmc_v7_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
/* Update configuration */
@@ -331,7 +301,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v7_0_mc_resume(adev, &save);
@@ -415,7 +385,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -975,6 +945,11 @@ static int gmc_v7_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
+
r = gmc_v7_0_mc_init(adev);
if (r)
return r;
@@ -1137,7 +1112,7 @@ static int gmc_v7_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v7_0_mc_stop(adev, &save);
- if (gmc_v7_0_wait_for_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 36fcbbc46ada..0b386b5d2f7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -26,11 +26,4 @@
extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9945d5bbf1fe..84c10d5117a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -41,6 +41,7 @@
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v8_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
@@ -102,6 +103,11 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
+static const u32 golden_settings_stoney_common[] =
+{
+ mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
+ mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
+};
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
@@ -141,50 +147,24 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_stoney_common,
+ (const u32)ARRAY_SIZE(golden_settings_stoney_common));
break;
default:
break;
}
}
-/**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- unsigned i;
- u32 tmp;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
- SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC1_BUSY_MASK);
- if (!tmp)
- return 0;
- udelay(1);
- }
- return -1;
-}
-
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
- amdgpu_asic_wait_for_mc_idle(adev);
+ gmc_v8_0_wait_for_idle(adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -199,8 +179,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 tmp;
@@ -281,7 +261,7 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 running, blackout = 0;
+ u32 running;
int i, ucode_size, regs_size;
if (!adev->mc.fw)
@@ -307,11 +287,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
if (running == 0) {
- if (running) {
- blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
- }
-
/* reset the engine and set to writable */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -343,9 +318,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
break;
udelay(1);
}
-
- if (running)
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -393,7 +365,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
amdgpu_display_set_vga_render_state(adev, false);
gmc_v8_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
/* Update configuration */
@@ -413,7 +385,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v8_0_mc_resume(adev, &save);
@@ -497,7 +469,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -977,6 +949,11 @@ static int gmc_v8_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
+
r = gmc_v8_0_mc_init(adev);
if (r)
return r;
@@ -1120,9 +1097,8 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static int gmc_v8_0_soft_reset(void *handle)
+static int gmc_v8_0_check_soft_reset(void *handle)
{
- struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1137,13 +1113,42 @@ static int gmc_v8_0_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
-
if (srbm_soft_reset) {
- gmc_v8_0_mc_stop(adev, &save);
- if (gmc_v8_0_wait_for_idle(adev)) {
- dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
- }
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
+ adev->mc.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
+ adev->mc.srbm_soft_reset = 0;
+ }
+ return 0;
+}
+static int gmc_v8_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ return 0;
+
+ gmc_v8_0_mc_stop(adev, &adev->mc.save);
+ if (gmc_v8_0_wait_for_idle(adev)) {
+ dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+ }
+
+ return 0;
+}
+
+static int gmc_v8_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ return 0;
+ srbm_soft_reset = adev->mc.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
@@ -1159,14 +1164,22 @@ static int gmc_v8_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- gmc_v8_0_mc_resume(adev, &save);
- udelay(50);
}
return 0;
}
+static int gmc_v8_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ return 0;
+
+ gmc_v8_0_mc_resume(adev, &adev->mc.save);
+ return 0;
+}
+
static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -1434,7 +1447,10 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.resume = gmc_v8_0_resume,
.is_idle = gmc_v8_0_is_idle,
.wait_for_idle = gmc_v8_0_wait_for_idle,
+ .check_soft_reset = gmc_v8_0_check_soft_reset,
+ .pre_soft_reset = gmc_v8_0_pre_soft_reset,
.soft_reset = gmc_v8_0_soft_reset,
+ .post_soft_reset = gmc_v8_0_post_soft_reset,
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index 973436086b38..fc5001a8119d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -26,11 +26,4 @@
extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
index 825ccd63f2dc..2f078ad6095c 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 52ee08193295..211839913728 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -25,7 +25,7 @@
#include "drmP.h"
#include "amdgpu.h"
#include "ppsmc.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
#include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h"
@@ -211,7 +211,7 @@ static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
PPSMC_Msg msg)
{
if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;;
+ return -EINVAL;
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
index 1e0769e110fa..5983e3150cc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
@@ -21,8 +21,8 @@
*
*/
-#ifndef ICELAND_SMUMGR_H
-#define ICELAND_SMUMGR_H
+#ifndef ICELAND_SMUM_H
+#define ICELAND_SMUM_H
#include "ppsmc.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index a789a863d677..f8618a3881a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
vid_mapping_table->num_entries = i;
}
+#if 0
static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
{
{ 0, 4, 1 },
@@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
{
{ 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
+#endif
static const struct kv_pt_config_reg didt_config_kv[] =
{
@@ -507,19 +509,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
- gfx_v7_0_enter_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return ret;
}
}
kv_do_enable_didt(adev, enable);
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
@@ -2843,7 +2845,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
- pi->caps_sclk_ds = true;
+ if (amdgpu_sclk_deep_sleep_en)
+ pi->caps_sclk_ds = true;
+ else
+ pi->caps_sclk_ds = false;
+
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
if (amdgpu_bapm == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
index 7837f2ecc357..8463245f424f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
@@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_StartFanControl ((uint8_t)0x5B)
#define PPSMC_StopFanControl ((uint8_t)0x5C)
#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
+#define PPSMC_NoDisplay ((uint8_t)0x5D)
#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
+#define PPSMC_HasDisplay ((uint8_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61)
#define PPSMC_MSG_EnableULV ((uint8_t)0x62)
@@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_DisableDTE ((uint8_t)0x88)
#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
/* CI/KV/KB */
#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
@@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
+#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index b556bd0a8797..e82229686783 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -255,19 +255,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 vmid = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 2)
- next_rptr++;
-
- next_rptr += 6;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
- amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
- amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -406,7 +393,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v2_4_gfx_stop(adev);
sdma_v2_4_rlc_stop(adev);
}
@@ -580,19 +567,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
- if (!adev->firmware.smu_load) {
- r = sdma_v2_4_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA0);
- if (r)
- return -EINVAL;
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA1);
- if (r)
- return -EINVAL;
+ if (!adev->pp_enabled) {
+ if (!adev->firmware.smu_load) {
+ r = sdma_v2_4_load_microcode(adev);
+ if (r)
+ return r;
+ } else {
+ r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+ AMDGPU_UCODE_ID_SDMA0);
+ if (r)
+ return -EINVAL;
+ r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+ AMDGPU_UCODE_ID_SDMA1);
+ if (r)
+ return -EINVAL;
+ }
}
/* halt the engine before programing */
@@ -679,20 +668,19 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
* Test a simple IB in the DMA ring (VI).
* Returns 0 on success, error on failure.
*/
-static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
+static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
- unsigned i;
unsigned index;
- int r;
u32 tmp = 0;
u64 gpu_addr;
+ long r;
r = amdgpu_wb_get(adev, &index);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
@@ -702,7 +690,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
@@ -721,28 +709,25 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err1;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err1;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err0:
@@ -764,24 +749,16 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
@@ -789,39 +766,27 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
-static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- value = amdgpu_vm_map_gart(pages_addr, addr);
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
}
}
@@ -837,40 +802,21 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
*
* Update the page tables using sDMA (CIK).
*/
-static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
- uint64_t pe,
+static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & AMDGPU_PTE_VALID)
- value = addr;
- else
- value = 0;
-
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 532ea88da66a..bee4978bec73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -415,18 +415,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 vmid = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 2)
- next_rptr++;
- next_rptr += 6;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
- amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
- amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -616,7 +604,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v3_0_gfx_stop(adev);
sdma_v3_0_rlc_stop(adev);
}
@@ -908,20 +896,19 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
* Test a simple IB in the DMA ring (VI).
* Returns 0 on success, error on failure.
*/
-static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
+static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
- unsigned i;
unsigned index;
- int r;
u32 tmp = 0;
u64 gpu_addr;
+ long r;
r = amdgpu_wb_get(adev, &index);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
@@ -931,7 +918,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
@@ -950,27 +937,24 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err1;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err1;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err0:
@@ -992,24 +976,16 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
@@ -1017,39 +993,27 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
-static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
-{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- value = amdgpu_vm_map_gart(pages_addr, addr);
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
+static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
+{
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
}
}
@@ -1065,40 +1029,21 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
*
* Update the page tables using sDMA (CIK).
*/
-static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
- uint64_t pe,
+static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & AMDGPU_PTE_VALID)
- value = addr;
- else
- value = 0;
-
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
@@ -1336,28 +1281,79 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v3_0_soft_reset(void *handle)
+static int sdma_v3_0_check_soft_reset(void *handle)
{
- u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
+ (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
- }
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
}
if (srbm_soft_reset) {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
+ adev->sdma.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
+ adev->sdma.srbm_soft_reset = 0;
+ }
+
+ return 0;
+}
+
+static int sdma_v3_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ return 0;
+
+ srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+ if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+ REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
+ }
+
+ return 0;
+}
+
+static int sdma_v3_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ return 0;
+
+ srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+ if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+ REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+ sdma_v3_0_gfx_resume(adev);
+ sdma_v3_0_rlc_resume(adev);
+ }
+
+ return 0;
+}
+
+static int sdma_v3_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ return 0;
+
+ srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1575,6 +1571,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.resume = sdma_v3_0_resume,
.is_idle = sdma_v3_0_is_idle,
.wait_for_idle = sdma_v3_0_wait_for_idle,
+ .check_soft_reset = sdma_v3_0_check_soft_reset,
+ .pre_soft_reset = sdma_v3_0_pre_soft_reset,
+ .post_soft_reset = sdma_v3_0_post_soft_reset,
.soft_reset = sdma_v3_0_soft_reset,
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index c92055805a45..d127d59f953a 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,10 +373,10 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int tonga_ih_soft_reset(void *handle)
+static int tonga_ih_check_soft_reset(void *handle)
{
- u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -384,6 +384,48 @@ static int tonga_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
+ adev->irq.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
+ adev->irq.srbm_soft_reset = 0;
+ }
+
+ return 0;
+}
+
+static int tonga_ih_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ return 0;
+
+ return tonga_ih_hw_fini(adev);
+}
+
+static int tonga_ih_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ return 0;
+
+ return tonga_ih_hw_init(adev);
+}
+
+static int tonga_ih_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ return 0;
+ srbm_soft_reset = adev->irq.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -427,7 +469,10 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = {
.resume = tonga_ih_resume,
.is_idle = tonga_ih_is_idle,
.wait_for_idle = tonga_ih_wait_for_idle,
+ .check_soft_reset = tonga_ih_check_soft_reset,
+ .pre_soft_reset = tonga_ih_pre_soft_reset,
.soft_reset = tonga_ih_soft_reset,
+ .post_soft_reset = tonga_ih_post_soft_reset,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 083893dd68c0..940de1836f8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -173,7 +173,7 @@ static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
{
if (!tonga_is_smc_ram_running(adev))
{
- return -EINVAL;;
+ return -EINVAL;
}
if (wait_smu_response(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f07551476a70..10c0407dcb6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -34,6 +34,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_4_1_d.h"
+
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -114,7 +116,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -439,6 +441,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* uvd_v4_2_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -499,49 +527,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
- * uvd_v4_2_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct fence *fence = NULL;
- int r;
-
- r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- if (r) {
- DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
- return r;
- }
-
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
- goto error;
- }
-
- r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
- goto error;
- }
-
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- fence_put(fence);
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
- return r;
-}
-
-/**
* uvd_v4_2_mc_resume - memory controller programming
*
* @adev: amdgpu_device pointer
@@ -763,10 +748,14 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
+ .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
.test_ring = uvd_v4_2_ring_test_ring,
- .test_ib = uvd_v4_2_ring_test_ib,
+ .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e0a76a883d46..8513376062c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -31,6 +31,7 @@
#include "uvd/uvd_5_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_5_0_d.h"
#include "vi.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -111,7 +112,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -489,6 +490,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* uvd_v5_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -550,49 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-/**
- * uvd_v5_0_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct fence *fence = NULL;
- int r;
-
- r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- if (r) {
- DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
- return r;
- }
-
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
- goto error;
- }
-
- r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
- goto error;
- }
-
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- fence_put(fence);
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
- return r;
-}
-
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -815,10 +799,14 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
+ .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v5_0_ring_test_ring,
- .test_ib = uvd_v5_0_ring_test_ib,
+ .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index c9929d665c01..2abe8a93c99f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -33,6 +33,8 @@
#include "oss/oss_2_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
+#include "bif/bif_5_1_d.h"
+#include "gmc/gmc_8_1_d.h"
#include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -114,7 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -385,8 +387,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
uint32_t mp_swap_cntl;
int i, j, r;
- /*disable DPG */
- WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
+ /* disable DPG */
+ WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* disable byte swapping */
lmi_swap_cntl = 0;
@@ -394,38 +396,40 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
uvd_v6_0_mc_resume(adev);
- /* Set dynamic clock gating in S/W control mode */
- if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
- uvd_v6_0_set_sw_clock_gating(adev);
- } else {
- /* disable clock gating */
- uint32_t data = RREG32(mmUVD_CGC_CTRL);
- data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
- WREG32(mmUVD_CGC_CTRL, data);
- }
+ /* disable clock gating */
+ WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0);
/* disable interupt */
- WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
+ WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
/* stall UMC and register bus before resetting VCPU */
- WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
- WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+ WREG32(mmUVD_SOFT_RESET,
+ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
mdelay(5);
/* take UVD block out of reset */
- WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+ WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
mdelay(5);
/* initialize UVD memory controller */
- WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
- (1 << 21) | (1 << 9) | (1 << 20));
+ WREG32(mmUVD_LMI_CTRL,
+ (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
@@ -447,10 +451,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
mdelay(5);
/* enable VCPU clock */
- WREG32(mmUVD_VCPU_CNTL, 1 << 9);
+ WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable UMC */
- WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
/* boot up the VCPU */
WREG32(mmUVD_SOFT_RESET, 0);
@@ -470,11 +474,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
break;
DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
- WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
mdelay(10);
- WREG32_P(mmUVD_SOFT_RESET, 0,
- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
mdelay(10);
r = -1;
}
@@ -484,20 +486,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
return r;
}
/* enable master interrupt */
- WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
+ WREG32_P(mmUVD_MASTINT_EN,
+ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+ ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
/* clear the bit 4 of UVD_STATUS */
- WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
+ WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
- tmp = 0;
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- /* force RBC into idle state */
WREG32(mmUVD_RBC_RB_CNTL, tmp);
/* set the write pointer delay */
@@ -518,7 +521,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
- WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+ WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
return 0;
}
@@ -581,6 +584,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* uvd_v6_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -634,6 +663,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
+ amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
+ amdgpu_ring_write(ring, vm_id);
+
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
@@ -642,39 +674,55 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-/**
- * uvd_v6_0_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
+static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct fence *fence = NULL;
- int r;
+ uint32_t reg;
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
- goto error;
- }
+ if (vm_id < 8)
+ reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
+ else
+ reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
- r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
- goto error;
- }
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, pd_addr >> 12);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0x8);
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- fence_put(fence);
- return r;
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+ amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0xC);
+}
+
+static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0xE);
}
static bool uvd_v6_0_is_idle(void *handle)
@@ -690,20 +738,82 @@ static int uvd_v6_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
+ if (uvd_v6_0_is_idle(handle))
return 0;
}
return -ETIMEDOUT;
}
-static int uvd_v6_0_soft_reset(void *handle)
+#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
+static int uvd_v6_0_check_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(mmSRBM_STATUS);
+
+ if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
+ REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
+ (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+ if (srbm_soft_reset) {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
+ adev->uvd.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
+ adev->uvd.srbm_soft_reset = 0;
+ }
+ return 0;
+}
+static int uvd_v6_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ return 0;
+
uvd_v6_0_stop(adev);
+ return 0;
+}
+
+static int uvd_v6_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ return 0;
+ srbm_soft_reset = adev->uvd.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int uvd_v6_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ return 0;
- WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
- ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
mdelay(5);
return uvd_v6_0_start(adev);
@@ -844,20 +954,15 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- static int curstate = -1;
- if (adev->asic_type == CHIP_FIJI)
- uvd_v6_set_bypass_mode(adev, enable);
+ if (adev->asic_type == CHIP_FIJI ||
+ adev->asic_type == CHIP_POLARIS10)
+ uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (curstate == state)
- return 0;
-
- curstate = state;
- if (enable) {
+ if (state == AMD_CG_STATE_GATE) {
/* disable HW gating and enable Sw gating */
uvd_v6_0_set_sw_clock_gating(adev);
} else {
@@ -887,6 +992,8 @@ static int uvd_v6_0_set_powergating_state(void *handle,
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
+ WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
+
if (state == AMD_PG_STATE_GATE) {
uvd_v6_0_stop(adev);
return 0;
@@ -907,27 +1014,59 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.resume = uvd_v6_0_resume,
.is_idle = uvd_v6_0_is_idle,
.wait_for_idle = uvd_v6_0_wait_for_idle,
+ .check_soft_reset = uvd_v6_0_check_soft_reset,
+ .pre_soft_reset = uvd_v6_0_pre_soft_reset,
.soft_reset = uvd_v6_0_soft_reset,
+ .post_soft_reset = uvd_v6_0_post_soft_reset,
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
};
-static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v6_0_ring_test_ring,
- .test_ib = uvd_v6_0_ring_test_ib,
+ .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+};
+
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .get_rptr = uvd_v6_0_ring_get_rptr,
+ .get_wptr = uvd_v6_0_ring_get_wptr,
+ .set_wptr = uvd_v6_0_ring_set_wptr,
+ .parse_cs = NULL,
+ .emit_ib = uvd_v6_0_ring_emit_ib,
+ .emit_fence = uvd_v6_0_ring_emit_fence,
+ .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
+ .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
+ .test_ring = uvd_v6_0_ring_test_ring,
+ .test_ib = amdgpu_uvd_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{
- adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs;
+ if (adev->asic_type >= CHIP_POLARIS10) {
+ adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
+ DRM_INFO("UVD is enabled in VM mode\n");
+ } else {
+ adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
+ DRM_INFO("UVD is enabled in physical mode\n");
+ }
}
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 45d92aceb485..5fa55b52c00e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -40,6 +40,7 @@
#define VCE_V2_0_FW_SIZE (256 * 1024)
#define VCE_V2_0_STACK_SIZE (64 * 1024)
#define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES)
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -96,6 +97,49 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmVCE_RB_WPTR2, ring->wptr);
}
+static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ uint32_t status = RREG32(mmVCE_LMI_STATUS);
+
+ if (status & 0x337f)
+ return 0;
+ mdelay(10);
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ uint32_t status = RREG32(mmVCE_STATUS);
+
+ if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
/**
* vce_v2_0_start - start VCE block
*
@@ -106,7 +150,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
static int vce_v2_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int i, j, r;
+ int r;
vce_v2_0_mc_resume(adev);
@@ -127,36 +171,12 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
+ WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
mdelay(100);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
- for (i = 0; i < 10; ++i) {
- uint32_t status;
- for (j = 0; j < 100; ++j) {
- status = RREG32(mmVCE_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
-
- DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
- WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- r = -1;
- }
+ r = vce_v2_0_firmware_loaded(adev);
/* clear BUSY flag */
WREG32_P(mmVCE_STATUS, 0, ~1);
@@ -338,47 +358,50 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
{
- u32 orig, tmp;
+ if (vce_v2_0_wait_for_idle(adev)) {
+ DRM_INFO("VCE is busy, Can't set clock gateing");
+ return;
+ }
- if (gated) {
- if (vce_v2_0_wait_for_idle(adev)) {
- DRM_INFO("VCE is busy, Can't set clock gateing");
- return;
- }
- WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
- WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(100);
- WREG32(mmVCE_STATUS, 0);
- } else {
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
- WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(100);
+ WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
+
+ if (vce_v2_0_lmi_clean(adev)) {
+ DRM_INFO("LMI is busy, Can't set clock gateing");
+ return;
}
- tmp = RREG32(mmVCE_CLOCK_GATING_B);
- tmp &= ~0x00060006;
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32(mmVCE_STATUS, 0);
+
+ if (gated)
+ WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+ /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
if (gated) {
- tmp |= 0xe10000;
+ /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
+ WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
} else {
- tmp |= 0xe1;
- tmp &= ~0xe10000;
+ /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
+ WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
}
- WREG32(mmVCE_CLOCK_GATING_B, tmp);
- orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
- tmp &= ~0x1fe000;
- tmp &= ~0xff000000;
- if (tmp != orig)
- WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+ /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
+ WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
- orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
- tmp &= ~0x3fc;
- if (tmp != orig)
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
- if (gated)
- WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
+ if(!gated) {
+ WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ mdelay(100);
+ WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+ vce_v2_0_firmware_loaded(adev);
+ WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+ }
}
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
@@ -458,9 +481,7 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
- WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
- ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
vce_v2_0_init_cg(adev);
}
@@ -474,11 +495,11 @@ static bool vce_v2_0_is_idle(void *handle)
static int vce_v2_0_wait_for_idle(void *handle)
{
- unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ unsigned i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+ if (vce_v2_0_is_idle(handle))
return 0;
}
return -ETIMEDOUT;
@@ -488,8 +509,7 @@ static int vce_v2_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
- ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+ WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
mdelay(5);
return vce_v2_0_start(adev);
@@ -516,10 +536,8 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
DRM_DEBUG("IH: VCE\n");
switch (entry->src_data) {
case 0:
- amdgpu_fence_process(&adev->vce.ring[0]);
- break;
case 1:
- amdgpu_fence_process(&adev->vce.ring[1]);
+ amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -594,6 +612,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 30e8099e94c5..615b8b16ad04 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,12 +37,16 @@
#include "gca/gfx_8_0_d.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024)
#define VCE_V3_0_STACK_SIZE (64 * 1024)
@@ -51,6 +55,7 @@
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
+static int vce_v3_0_wait_for_idle(void *handle);
/**
* vce_v3_0_ring_get_rptr - get read pointer
@@ -105,106 +110,99 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
{
- u32 tmp, data;
-
- tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
- if (override)
- data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
- else
- data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-
- if (tmp != data)
- WREG32(mmVCE_RB_ARB_CTRL, data);
+ WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
}
static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
bool gated)
{
- u32 tmp, data;
+ u32 data;
+
/* Set Override to disable Clock Gating */
vce_v3_0_override_vce_clock_gating(adev, true);
- if (!gated) {
- /* Force CLOCK ON for VCE_CLOCK_GATING_B,
- * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
- * VREG can be FORCE ON or set to Dynamic, but can't be OFF
- */
- tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+ /* This function enables MGCG which is controlled by firmware.
+ With the clocks in the gated state the core is still
+ accessible but the firmware will throttle the clocks on the
+ fly as necessary.
+ */
+ if (gated) {
+ data = RREG32(mmVCE_CLOCK_GATING_B);
data |= 0x1ff;
data &= ~0xef0000;
- if (tmp != data)
- WREG32(mmVCE_CLOCK_GATING_B, data);
+ WREG32(mmVCE_CLOCK_GATING_B, data);
- /* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
- * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
- */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+ data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0x3ff000;
data &= ~0xffc00000;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING, data);
+ WREG32(mmVCE_UENC_CLOCK_GATING, data);
- /* set VCE_UENC_CLOCK_GATING_2 */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+ data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x2;
- data &= ~0x2;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+ data &= ~0x00010000;
+ WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
- /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
- tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data |= 0x37f;
- if (tmp != data)
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
- /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
- tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+ data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8;
- if (tmp != data)
- WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+ VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+ VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
+ 0x8;
+ WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
} else {
- /* Force CLOCK OFF for VCE_CLOCK_GATING_B,
- * {*, *_FORCE_OFF} = {*, 1}
- * set VREG to Dynamic, as it can't be OFF
- */
- tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+ data = RREG32(mmVCE_CLOCK_GATING_B);
data &= ~0x80010;
data |= 0xe70008;
- if (tmp != data)
- WREG32(mmVCE_CLOCK_GATING_B, data);
- /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
- * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
- * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
- */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+ WREG32(mmVCE_CLOCK_GATING_B, data);
+
+ data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0xffc00000;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING, data);
- /* Set VCE_UENC_CLOCK_GATING_2 */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+ WREG32(mmVCE_UENC_CLOCK_GATING, data);
+
+ data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x10000;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
- /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
- tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+
+ data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data &= ~0xffc00000;
- if (tmp != data)
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
- /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
- tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+
+ data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8);
- if (tmp != data)
- WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+ VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+ VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
+ 0x8);
+ WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
}
vce_v3_0_override_vce_clock_gating(adev, false);
}
+static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ uint32_t status = RREG32(mmVCE_STATUS);
+
+ if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
+ mdelay(10);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
/**
* vce_v3_0_start - start VCE block
*
@@ -215,70 +213,43 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
static int vce_v3_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int idx, i, j, r;
+ int idx, r;
+
+ ring = &adev->vce.ring[0];
+ WREG32(mmVCE_RB_RPTR, ring->wptr);
+ WREG32(mmVCE_RB_WPTR, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[1];
+ WREG32(mmVCE_RB_RPTR2, ring->wptr);
+ WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
-
if (adev->vce.harvest_config & (1 << idx))
continue;
- if (idx == 0)
- WREG32_P(mmGRBM_GFX_INDEX, 0,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
- else
- WREG32_P(mmGRBM_GFX_INDEX,
- GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
vce_v3_0_mc_resume(adev, idx);
+ WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
- /* set BUSY flag */
- WREG32_P(mmVCE_STATUS, 1, ~1);
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
else
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
- ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
mdelay(100);
- WREG32_P(mmVCE_SOFT_RESET, 0,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
- for (i = 0; i < 10; ++i) {
- uint32_t status;
- for (j = 0; j < 100; ++j) {
- status = RREG32(mmVCE_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
-
- DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- WREG32_P(mmVCE_SOFT_RESET, 0,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- r = -1;
- }
+ r = vce_v3_0_firmware_loaded(adev);
/* clear BUSY flag */
- WREG32_P(mmVCE_STATUS, 0, ~1);
-
- /* Set Clock-Gating off */
- if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
- vce_v3_0_set_vce_sw_clock_gating(adev, false);
+ WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
if (r) {
DRM_ERROR("VCE not responding, giving up!!!\n");
@@ -287,22 +258,41 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
}
}
- WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- ring = &adev->vce.ring[0];
- WREG32(mmVCE_RB_RPTR, ring->wptr);
- WREG32(mmVCE_RB_WPTR, ring->wptr);
- WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
- WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+ return 0;
+}
- ring = &adev->vce.ring[1];
- WREG32(mmVCE_RB_RPTR2, ring->wptr);
- WREG32(mmVCE_RB_WPTR2, ring->wptr);
- WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
- WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+static int vce_v3_0_stop(struct amdgpu_device *adev)
+{
+ int idx;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (idx = 0; idx < 2; ++idx) {
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
+
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+
+ if (adev->asic_type >= CHIP_STONEY)
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
+ else
+ WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
+
+ /* hold on ECPU */
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
+
+ /* clear BUSY flag */
+ WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
+
+ /* Set Clock-Gating off */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
+ vce_v3_0_set_vce_sw_clock_gating(adev, false);
+ }
+
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
@@ -441,7 +431,14 @@ static int vce_v3_0_hw_init(void *handle)
static int vce_v3_0_hw_fini(void *handle)
{
- return 0;
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = vce_v3_0_wait_for_idle(handle);
+ if (r)
+ return r;
+
+ return vce_v3_0_stop(adev);
}
static int vce_v3_0_suspend(void *handle)
@@ -483,7 +480,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
- WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+ WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
WREG32(mmVCE_LMI_CTRL, 0x00398000);
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
@@ -522,9 +519,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
}
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
- WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
- ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
static bool vce_v3_0_is_idle(void *handle)
@@ -550,20 +545,108 @@ static int vce_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
+#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
+#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
+#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
+#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
+ VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
+
+static int vce_v3_0_check_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+
+ /* According to VCE team , we should use VCE_STATUS instead
+ * SRBM_STATUS.VCE_BUSY bit for busy status checking.
+ * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
+ * instance's registers are accessed
+ * (0 for 1st instance, 10 for 2nd instance).
+ *
+ *VCE_STATUS
+ *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
+ *|----+----+-----------+----+----+----+----------+---------+----|
+ *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
+ *
+ * VCE team suggest use bit 3--bit 6 for busy status check
+ */
+ mutex_lock(&adev->grbm_idx_mutex);
+ WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+ }
+ WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+ if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+ }
+ WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+
+ if (srbm_soft_reset) {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
+ adev->vce.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
+ adev->vce.srbm_soft_reset = 0;
+ }
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return 0;
+}
+
static int vce_v3_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 mask = 0;
+ u32 srbm_soft_reset;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ return 0;
+ srbm_soft_reset = adev->vce.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int vce_v3_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ return 0;
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+ mdelay(5);
+
+ return vce_v3_0_suspend(adev);
+}
+
+
+static int vce_v3_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ return 0;
- WREG32_P(mmSRBM_SOFT_RESET, mask,
- ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
- SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
mdelay(5);
- return vce_v3_0_start(adev);
+ return vce_v3_0_resume(adev);
}
static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -586,9 +669,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
{
DRM_DEBUG("IH: VCE\n");
- WREG32_P(mmVCE_SYS_INT_STATUS,
- VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
- ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
+ WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
switch (entry->src_data) {
case 0:
@@ -604,6 +685,18 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+ else
+ tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -611,6 +704,9 @@ static int vce_v3_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
+ if (adev->asic_type == CHIP_POLARIS10)
+ vce_v3_set_bypass_mode(adev, enable);
+
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
@@ -620,13 +716,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
if (adev->vce.harvest_config & (1 << i))
continue;
- if (i == 0)
- WREG32_P(mmGRBM_GFX_INDEX, 0,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
- else
- WREG32_P(mmGRBM_GFX_INDEX,
- GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -645,7 +735,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
- WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -685,7 +775,10 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.resume = vce_v3_0_resume,
.is_idle = vce_v3_0_is_idle,
.wait_for_idle = vce_v3_0_wait_for_idle,
+ .check_soft_reset = vce_v3_0_check_soft_reset,
+ .pre_soft_reset = vce_v3_0_pre_soft_reset,
.soft_reset = vce_v3_0_soft_reset,
+ .post_soft_reset = vce_v3_0_post_soft_reset,
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
};
@@ -701,6 +794,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index a65c96029476..f2e8aa1a0dbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -77,6 +77,7 @@
#if defined(CONFIG_DRM_AMD_ACP)
#include "amdgpu_acp.h"
#endif
+#include "dce_virtual.h"
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
@@ -203,6 +204,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}
+static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32(mmGC_CAC_IND_INDEX, (reg));
+ r = RREG32(mmGC_CAC_IND_DATA);
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+ return r;
+}
+
+static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32(mmGC_CAC_IND_INDEX, (reg));
+ WREG32(mmGC_CAC_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+}
+
+
static const u32 tonga_mgcg_cgcg_init[] =
{
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
@@ -533,12 +557,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v8_0_select_se_sh(adev, se_num, sh_num);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -597,7 +621,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
{
u32 i;
@@ -612,11 +636,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
- break;
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ return 0;
+ }
udelay(1);
}
-
+ return -EINVAL;
}
static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -642,13 +669,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun
*/
static int vi_asic_reset(struct amdgpu_device *adev)
{
+ int r;
+
vi_set_bios_scratch_engine_hung(adev, true);
- vi_gpu_pci_config_reset(adev);
+ r = vi_gpu_pci_config_reset(adev);
vi_set_bios_scratch_engine_hung(adev, false);
- return 0;
+ return r;
}
static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -794,6 +823,60 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &iceland_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v2_4_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -862,6 +945,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v5_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -930,6 +1081,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -998,6 +1217,74 @@ static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version cz_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1075,34 +1362,142 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] =
#endif
};
+static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+#if defined(CONFIG_DRM_AMD_ACP)
+ {
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+ },
+#endif
+};
+
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
+ if (adev->enable_virtual_display) {
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ adev->ip_blocks = topaz_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
+ break;
+ case CHIP_FIJI:
+ adev->ip_blocks = fiji_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
+ break;
+ case CHIP_TONGA:
+ adev->ip_blocks = tonga_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ adev->ip_blocks = polaris11_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
+ break;
+
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ adev->ip_blocks = cz_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ adev->ip_blocks = topaz_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
+ break;
+ case CHIP_FIJI:
+ adev->ip_blocks = fiji_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+ break;
+ case CHIP_TONGA:
+ adev->ip_blocks = tonga_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ adev->ip_blocks = polaris11_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ adev->ip_blocks = cz_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
}
return 0;
@@ -1133,9 +1528,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
.get_virtual_caps = &vi_get_virtual_caps,
- /* these should be moved to their own ip modules */
- .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
- .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
};
static int vi_common_early_init(void *handle)
@@ -1156,6 +1548,8 @@ static int vi_common_early_init(void *handle)
adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
adev->didt_rreg = &vi_didt_rreg;
adev->didt_wreg = &vi_didt_wreg;
+ adev->gc_cac_rreg = &vi_gc_cac_rreg;
+ adev->gc_cac_wreg = &vi_gc_cac_wreg;
adev->asic_funcs = &vi_asic_funcs;
@@ -1221,20 +1615,41 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
- AMD_CG_SUPPORT_SDMA_LS;
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCE_MGCG;
+ /* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
+ if (adev->rev_id != 0x00) {
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_UVD |
+ AMD_PG_SUPPORT_VCE;
+ }
adev->external_rev_id = adev->rev_id + 0x1;
break;
case CHIP_STONEY:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
- AMD_CG_SUPPORT_SDMA_LS;
- adev->pg_flags = 0;
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCE_MGCG;
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_UVD |
+ AMD_PG_SUPPORT_VCE;
adev->external_rev_id = adev->rev_id + 0x1;
break;
default: